From 9b868cea17b511492995e9920bbd7d6be93a7e11 Mon Sep 17 00:00:00 2001 From: mleku Date: Sat, 12 Jul 2025 22:14:40 +0100 Subject: [PATCH] migrate to mostly working old realy codebase, preparing to integrate new database and event codec --- .idea/workspace.xml | 398 ++- LICENSE | 137 +- addresstag/addresstag.go | 21 + app/main.go | 81 + app/resources.go | 30 + apputil/doc.go | 2 + atomic/.codecov.yml | 19 + atomic/CHANGELOG.md | 130 + atomic/LICENSE | 19 + atomic/Makefile | 79 + atomic/README.md | 33 + atomic/assert_test.go | 45 + atomic/bool.go | 88 + atomic/bool_ext.go | 53 + atomic/bool_test.go | 150 + atomic/doc.go | 23 + atomic/duration.go | 89 + atomic/duration_ext.go | 40 + atomic/duration_test.go | 73 + atomic/error.go | 72 + atomic/error_ext.go | 39 + atomic/error_test.go | 136 + atomic/example_test.go | 43 + atomic/float32.go | 77 + atomic/float32_ext.go | 76 + atomic/float32_test.go | 73 + atomic/float64.go | 77 + atomic/float64_ext.go | 76 + atomic/float64_test.go | 73 + atomic/gen.go | 27 + atomic/int32.go | 109 + atomic/int32_test.go | 82 + atomic/int64.go | 109 + atomic/int64_test.go | 82 + atomic/internal/gen-atomicint/main.go | 116 + atomic/internal/gen-atomicint/wrapper.tmpl | 117 + atomic/internal/gen-atomicwrapper/main.go | 203 ++ .../internal/gen-atomicwrapper/wrapper.tmpl | 120 + atomic/nocmp.go | 35 + atomic/nocmp_test.go | 164 + atomic/pointer_test.go | 100 + atomic/stress_test.go | 289 ++ atomic/string.go | 72 + atomic/string_ext.go | 54 + atomic/string_test.go | 170 + atomic/time.go | 55 + atomic/time_ext.go | 36 + atomic/time_test.go | 86 + atomic/tools/tools.go | 30 + atomic/uint32.go | 109 + atomic/uint32_test.go | 77 + atomic/uint64.go | 109 + atomic/uint64_test.go | 77 + atomic/uintptr.go | 109 + atomic/uintptr_test.go | 80 + atomic/unsafe_pointer.go | 65 + atomic/unsafe_pointer_test.go | 83 + atomic/value.go | 31 + atomic/value_test.go | 40 + auth/nip42.go | 18 +- auth/nip42_test.go | 2 +- bech32encoding/keys.go | 4 +- bech32encoding/keys_test.go | 2 +- bech32encoding/nip19.go | 10 +- bech32encoding/nip19_test.go | 4 +- bech32encoding/tlv/tlv.go | 8 +- bin/binary.go | 40 + cmd/doc.go | 2 + cmd/lerproxy/LICENSE | 22 + cmd/lerproxy/README.md | 125 + cmd/lerproxy/buf/bufpool.go | 16 + cmd/lerproxy/hsts/proxy.go | 15 + cmd/lerproxy/main.go | 403 +++ cmd/lerproxy/reverse/proxy.go | 35 + cmd/lerproxy/tcpkeepalive/listener.go | 40 + cmd/lerproxy/timeout/conn.go | 33 + cmd/lerproxy/util/u.go | 26 + cmd/nauth/main.go | 89 + cmd/nurl/main.go | 196 ++ cmd/vainstr/LICENSE | 121 + cmd/vainstr/README.md | 16 + cmd/vainstr/main.go | 233 ++ codec/codec.go | 42 + database/fetch-event-by-serial_test.go | 31 +- database/get-serial-by-id_test.go | 14 +- database/indexes/keys_test.go | 2 + database/indexes/types/identhash_test.go | 3 + database/query-events.go | 4 +- database/query-for-ids.go | 4 +- database/save-event.go | 8 +- dns/nip05.go | 157 + dns/nip05_test.go | 71 + ec/base58/base58_test.go | 3 +- ec/base58/base58check.go | 2 +- ec/base58/base58check_test.go | 3 +- ec/base58/example_test.go | 3 +- ec/base58/genalphabet.go | 6 +- ec/base58/util_test.go | 9 + ec/bech32/bech32.go | 22 +- ec/bech32/bech32_test.go | 544 ++-- ec/bech32/example_test.go | 8 +- ec/bench_test.go | 5 +- ec/btcec_test.go | 139 +- ec/chaincfg/deployment_time_frame.go | 7 +- ec/chaincfg/params.go | 3 +- ec/chainhash/hash.go | 16 +- ec/chainhash/hash_test.go | 82 +- ec/chainhash/hashfuncs.go | 2 +- ec/ciphering_test.go | 12 +- ec/curve.go | 3 +- ec/ecdsa/bench_test.go | 7 +- ec/ecdsa/example_test.go | 10 +- ec/ecdsa/signature_test.go | 12 +- ec/ecdsa/util_test.go | 9 + ec/field_test.go | 4 +- ec/fuzz_test.go | 7 +- ec/musig2/bench_test.go | 23 +- ec/musig2/context.go | 12 +- ec/musig2/keys.go | 3 +- ec/musig2/keys_test.go | 5 +- ec/musig2/musig2_test.go | 52 +- ec/musig2/nonces.go | 20 +- ec/musig2/nonces_test.go | 5 +- ec/musig2/sign.go | 22 +- ec/musig2/sign_test.go | 5 +- ec/pubkey_test.go | 96 +- ec/schnorr/bench_test.go | 12 +- ec/schnorr/signature.go | 8 +- ec/schnorr/signature_test.go | 22 +- ec/secp256k1/bench_test.go | 4 +- ec/secp256k1/curve.go | 5 +- ec/secp256k1/curve_test.go | 3 +- ec/secp256k1/ecdh_test.go | 12 +- ec/secp256k1/ellipticadaptor_test.go | 3 +- ec/secp256k1/example_test.go | 22 +- ec/secp256k1/field_test.go | 2 +- ec/secp256k1/loadprecomputed.go | 6 +- ec/secp256k1/modnscalar_test.go | 2 +- ec/secp256k1/nonce.go | 2 +- ec/secp256k1/nonce_test.go | 6 +- ec/secp256k1/precomps/genprecomps.go | 10 +- ec/secp256k1/pubkey_test.go | 662 ++-- ec/secp256k1/seckey.go | 1 - ec/secp256k1/seckey_bench_test.go | 4 +- ec/secp256k1/seckey_test.go | 84 +- ec/secp256k1/util_test.go | 9 + ec/taproot/taproot.go | 2 +- ec/util_test.go | 9 + encryption/README.md | 1 + encryption/doc.go | 3 + encryption/nip4.go | 116 + encryption/nip44.go | 241 ++ encryption/nip44_test.go | 1368 ++++++++ env/config.go | 4 +- envelopes/authenvelope/authenvelope.go | 10 +- envelopes/authenvelope/authenvelope_test.go | 6 +- envelopes/closedenvelope/closedenvelope.go | 4 +- .../closedenvelope/closedenvelope_test.go | 4 +- envelopes/closeenvelope/closeenvelope.go | 4 +- envelopes/closeenvelope/closeenvelope_test.go | 4 +- envelopes/countenvelope/countenvelope.go | 6 +- envelopes/countenvelope/countenvelope_test.go | 4 +- envelopes/eid/eid.go | 5 - envelopes/eoseenvelope/eoseenvelope.go | 6 +- envelopes/eoseenvelope/eoseenvelope_test.go | 4 +- envelopes/eventenvelope/eventenvelope.go | 10 +- envelopes/eventenvelope/eventenvelope_test.go | 8 +- envelopes/identify.go | 11 +- envelopes/noticeenvelope/noticeenvelope.go | 4 +- .../noticeenvelope/noticeenvelope_test.go | 4 +- envelopes/okenvelope/okenvelope.go | 17 +- envelopes/okenvelope/okenvelope_test.go | 4 +- envelopes/reqenvelope/reqenvelope.go | 6 +- envelopes/reqenvelope/reqenvelope_test.go | 4 +- event/binary.go | 6 +- event/json.go | 2 +- eventid/eventid.go | 6 +- filter/filter.go | 131 +- filter/filter_test.go | 3 +- filter/simple.go | 18 +- filters/filters.go | 2 +- filters/filters_test.go | 3 +- go.mod | 42 +- go.sum | 87 +- helpers/helpers.go | 49 - hex/aliases.go | 7 +- httpauth/nip98auth.go | 4 +- httpauth/validate.go | 20 +- interfaces/store/store_interface.go | 5 + interrupt/main.go | 17 +- interrupt/restart.go | 3 +- ints/gen/pregen.go | 3 +- ints/ints.go | 5 +- ints/ints_test.go | 3 +- json/base64.go | 2 +- json/bech32.go | 4 +- json/examples_test.go | 2 +- json/keyvalue.go | 4 +- json/signed.go | 2 +- json/unsigned.go | 2 +- keys/keys.go | 82 + kind/kind.go | 2 +- kind/kind_test.go | 3 +- kinds/kinds_test.go | 2 +- layer2/badgerbadger/badgerbadger.go | 67 + layer2/badgerbadger/tester/badgerbadger.go | 216 ++ layer2/layer2.go | 279 ++ list/list.go | 3 - lol/README.md | 43 +- lol/log.go | 113 +- lol/log_test.go | 158 +- main.go | 82 +- normalize/normalize.go | 50 +- nwc/doc.go | 4 + nwc/error.go | 6 + nwc/get_balance.go | 19 + nwc/get_info.go | 29 + nwc/lightning.go | 18 + nwc/list_transactions.go | 21 + nwc/lookup_invoice.go | 26 + nwc/make_invoice_response.go | 29 + nwc/multi_pay_invoice.go | 19 + nwc/multi_pay_keysend.go | 18 + nwc/names.go | 130 + nwc/payKeysend.go | 1 + nwc/pay_invoice.go | 91 + nwc/pay_invoice_test.go | 25 + nwc/pay_keysend.go | 33 + nwc/protocols.go | 101 + openapi/common.go | 12 + openapi/http-configuration.go | 94 + openapi/http-disconnect.go | 51 + openapi/http-event.go | 249 ++ openapi/http-events.go | 124 + openapi/http-export.go | 68 + openapi/http-filter.go | 235 ++ openapi/http-import.go | 71 + openapi/http-nuke.go | 71 + openapi/http-relay.go | 95 + openapi/http-rescan.go | 58 + openapi/http-shutdown.go | 51 + openapi/http-subscribe.go | 158 + openapi/huma.go | 49 + openapi/publisher-openapi.go | 96 + openapi/serveMux.go | 21 + p256k/btcec.go | 1 - p256k/btcec/btcec.go | 16 +- p256k/btcec/btcec_test.go | 99 +- p256k/btcec/util_test.go | 9 + p256k/doc.go | 2 +- p256k/p256k.go | 33 +- p256k/p256k_test.go | 123 +- p256k/secp256k1.go | 10 +- p256k/secp256k1_test.go | 75 + p256k/util_test.go | 9 + qu/README.adoc | 60 + qu/qu.go | 242 ++ ratel/close.go | 26 + ratel/compact.go | 34 + ratel/configuration.go | 47 + ratel/countevents.go | 135 + ratel/create-a-tag.go | 94 + ratel/del/del.go | 13 + ratel/deleteevent.go | 120 + ratel/export.go | 209 ++ ratel/fetch-ids.go | 79 + ratel/garbagecollector.go | 66 + ratel/gccount.go | 203 ++ ratel/gcmark.go | 63 + ratel/gcsweep.go | 124 + ratel/getecounterkey.go | 10 + ratel/getindexkeysforevent.go | 115 + ratel/gettagkeyprefix.go | 56 + ratel/import.go | 49 + ratel/init.go | 114 + ratel/keys/arb/arb.go | 94 + ratel/keys/arb/arb_test.go | 22 + ratel/keys/count/count.go | 47 + ratel/keys/createdat/createdat.go | 49 + ratel/keys/createdat/createdat_test.go | 26 + ratel/keys/fullid/fullid.go | 48 + ratel/keys/fullid/fullid_test.go | 25 + ratel/keys/fullpubkey/fullpubkey.go | 47 + ratel/keys/fullpubkey/fullpubkey_test.go | 23 + ratel/keys/id/id.go | 72 + ratel/keys/id/id_test.go | 24 + ratel/keys/index/index.go | 52 + ratel/keys/index/prefixes.go | 32 + ratel/keys/keys.go | 44 + ratel/keys/keys_test.go | 142 + ratel/keys/kinder/kind.go | 45 + ratel/keys/kinder/kind_test.go | 21 + ratel/keys/pubkey/pubkey.go | 75 + ratel/keys/pubkey/pubkey_test.go | 29 + ratel/keys/serial/serial.go | 85 + ratel/keys/serial/serial_test.go | 23 + ratel/keys/tombstone/tombstone.go | 49 + ratel/keys/tombstone/tombstone_test.go | 23 + ratel/keys/util_test.go | 9 + ratel/log.go | 69 + ratel/main.go | 169 + ratel/nuke.go | 19 + ratel/prefixes/index_test.go | 21 + ratel/prefixes/prefixes.go | 180 + ratel/preparequeries.go | 202 ++ ratel/queryevents.go | 293 ++ ratel/queryforids.go | 209 ++ ratel/rescan.go | 82 + ratel/saveevent.go | 155 + readme.adoc | 125 + readme.md | 5 - orly.png => realy.png | Bin realy.service | 16 + realy/addEvent.go | 75 + realy/auth.go | 32 + realy/config/config.go | 217 ++ realy/disconnect.go | 10 + realy/doc.go | 3 + realy/handleRelayinfo.go | 63 + realy/handleWebsocket.go | 13 + realy/helpers/helpers.go | 34 + realy/interfaces/interfaces.go | 38 + realy/options/options.go | 33 + {pointers => realy/pointers}/pointers.go | 0 {publish => realy/publish}/publisher.go | 20 +- realy/publish/publisher/interface.go | 17 + realy/server-impl.go | 65 + realy/server-publish.go | 155 + realy/server.go | 186 ++ realy/server_test.go | 92 + realy/testrelay.go | 153 + reason/reason.go | 54 - relay/interface.go | 121 + relayinfo/fetch.go | 4 +- relayinfo/types.go | 6 +- reload.sh | 7 + scripts/runtests.sh | 2 + servemux/serveMux.go | 26 - server/add-event.go | 24 - server/handle-relayinfo.go | 54 - server/server.go | 79 - sha256/LICENSE | 202 ++ sha256/README.md | 137 + sha256/cpuid_other.go | 52 + sha256/doc.go | 6 + sha256/sha256.go | 468 +++ sha256/sha256_test.go | 2886 +++++++++++++++++ sha256/sha256blockAvx512_amd64.asm | 686 ++++ sha256/sha256blockAvx512_amd64.go | 508 +++ sha256/sha256blockAvx512_amd64.s | 267 ++ sha256/sha256blockAvx512_amd64_test.go | 443 +++ sha256/sha256block_amd64.go | 31 + sha256/sha256block_amd64.s | 266 ++ sha256/sha256block_amd64_test.go | 78 + sha256/sha256block_arm64.go | 38 + sha256/sha256block_arm64.s | 192 ++ sha256/sha256block_other.go | 29 + sha256/test-architectures.sh | 15 + signer/signer.go | 27 +- socketapi/challenge.go | 39 + socketapi/handleAuth.go | 52 + socketapi/handleClose.go | 17 +- socketapi/handleEvent.go | 482 +-- socketapi/handleMessage.go | 30 +- socketapi/handleReq.go | 146 +- socketapi/ok.go | 110 - socketapi/pinger.go | 38 + socketapi/publisher.go | 92 +- socketapi/socketapi.go | 175 +- socketapi/upgrader.go | 12 + subscription/subscriptionid.go | 4 +- subscription/subscriptionid_test.go | 3 +- tag/atag/atag.go | 2 +- tag/atag/atag_test.go | 4 +- tag/tag.go | 23 +- tag/tag_test.go | 5 +- tags/tags.go | 8 +- tags/tags_test.go | 4 +- tests/generate.go | 36 + text/escape_test.go | 4 +- text/helpers.go | 4 +- text/helpers_test.go | 4 +- timestamp/timestamp.go | 10 +- ubuntu_install_libsecp256k1.sh | 2 +- version/doc.go | 2 + version/version | 2 +- version/version.go | 10 +- ws/client.go | 555 ++++ ws/client_test.go | 271 ++ ws/connection.go | 6 +- ws/doc.go | 3 + ws/listener.go | 94 +- ws/pool.go | 432 +++ ws/subscription.go | 189 ++ ws/subscription_test.go | 130 + 395 files changed, 27364 insertions(+), 2835 deletions(-) create mode 100644 addresstag/addresstag.go create mode 100644 app/main.go create mode 100644 app/resources.go create mode 100644 apputil/doc.go create mode 100644 atomic/.codecov.yml create mode 100644 atomic/CHANGELOG.md create mode 100644 atomic/LICENSE create mode 100644 atomic/Makefile create mode 100644 atomic/README.md create mode 100644 atomic/assert_test.go create mode 100644 atomic/bool.go create mode 100644 atomic/bool_ext.go create mode 100644 atomic/bool_test.go create mode 100644 atomic/doc.go create mode 100644 atomic/duration.go create mode 100644 atomic/duration_ext.go create mode 100644 atomic/duration_test.go create mode 100644 atomic/error.go create mode 100644 atomic/error_ext.go create mode 100644 atomic/error_test.go create mode 100644 atomic/example_test.go create mode 100644 atomic/float32.go create mode 100644 atomic/float32_ext.go create mode 100644 atomic/float32_test.go create mode 100644 atomic/float64.go create mode 100644 atomic/float64_ext.go create mode 100644 atomic/float64_test.go create mode 100644 atomic/gen.go create mode 100644 atomic/int32.go create mode 100644 atomic/int32_test.go create mode 100644 atomic/int64.go create mode 100644 atomic/int64_test.go create mode 100644 atomic/internal/gen-atomicint/main.go create mode 100644 atomic/internal/gen-atomicint/wrapper.tmpl create mode 100644 atomic/internal/gen-atomicwrapper/main.go create mode 100644 atomic/internal/gen-atomicwrapper/wrapper.tmpl create mode 100644 atomic/nocmp.go create mode 100644 atomic/nocmp_test.go create mode 100644 atomic/pointer_test.go create mode 100644 atomic/stress_test.go create mode 100644 atomic/string.go create mode 100644 atomic/string_ext.go create mode 100644 atomic/string_test.go create mode 100644 atomic/time.go create mode 100644 atomic/time_ext.go create mode 100644 atomic/time_test.go create mode 100644 atomic/tools/tools.go create mode 100644 atomic/uint32.go create mode 100644 atomic/uint32_test.go create mode 100644 atomic/uint64.go create mode 100644 atomic/uint64_test.go create mode 100644 atomic/uintptr.go create mode 100644 atomic/uintptr_test.go create mode 100644 atomic/unsafe_pointer.go create mode 100644 atomic/unsafe_pointer_test.go create mode 100644 atomic/value.go create mode 100644 atomic/value_test.go create mode 100644 bin/binary.go create mode 100644 cmd/doc.go create mode 100644 cmd/lerproxy/LICENSE create mode 100644 cmd/lerproxy/README.md create mode 100644 cmd/lerproxy/buf/bufpool.go create mode 100644 cmd/lerproxy/hsts/proxy.go create mode 100644 cmd/lerproxy/main.go create mode 100644 cmd/lerproxy/reverse/proxy.go create mode 100644 cmd/lerproxy/tcpkeepalive/listener.go create mode 100644 cmd/lerproxy/timeout/conn.go create mode 100644 cmd/lerproxy/util/u.go create mode 100644 cmd/nauth/main.go create mode 100644 cmd/nurl/main.go create mode 100644 cmd/vainstr/LICENSE create mode 100644 cmd/vainstr/README.md create mode 100644 cmd/vainstr/main.go create mode 100644 codec/codec.go create mode 100644 dns/nip05.go create mode 100644 dns/nip05_test.go create mode 100644 ec/base58/util_test.go create mode 100644 ec/ecdsa/util_test.go create mode 100644 ec/secp256k1/util_test.go create mode 100644 ec/util_test.go create mode 100644 encryption/README.md create mode 100644 encryption/doc.go create mode 100644 encryption/nip4.go create mode 100644 encryption/nip44.go create mode 100644 encryption/nip44_test.go delete mode 100644 envelopes/eid/eid.go delete mode 100644 helpers/helpers.go create mode 100644 keys/keys.go create mode 100644 layer2/badgerbadger/badgerbadger.go create mode 100644 layer2/badgerbadger/tester/badgerbadger.go create mode 100644 layer2/layer2.go delete mode 100644 list/list.go create mode 100644 nwc/doc.go create mode 100644 nwc/error.go create mode 100644 nwc/get_balance.go create mode 100644 nwc/get_info.go create mode 100644 nwc/lightning.go create mode 100644 nwc/list_transactions.go create mode 100644 nwc/lookup_invoice.go create mode 100644 nwc/make_invoice_response.go create mode 100644 nwc/multi_pay_invoice.go create mode 100644 nwc/multi_pay_keysend.go create mode 100644 nwc/names.go create mode 100644 nwc/payKeysend.go create mode 100644 nwc/pay_invoice.go create mode 100644 nwc/pay_invoice_test.go create mode 100644 nwc/pay_keysend.go create mode 100644 nwc/protocols.go create mode 100644 openapi/common.go create mode 100644 openapi/http-configuration.go create mode 100644 openapi/http-disconnect.go create mode 100644 openapi/http-event.go create mode 100644 openapi/http-events.go create mode 100644 openapi/http-export.go create mode 100644 openapi/http-filter.go create mode 100644 openapi/http-import.go create mode 100644 openapi/http-nuke.go create mode 100644 openapi/http-relay.go create mode 100644 openapi/http-rescan.go create mode 100644 openapi/http-shutdown.go create mode 100644 openapi/http-subscribe.go create mode 100644 openapi/huma.go create mode 100644 openapi/publisher-openapi.go create mode 100644 openapi/serveMux.go create mode 100644 p256k/btcec/util_test.go create mode 100644 p256k/util_test.go create mode 100644 qu/README.adoc create mode 100644 qu/qu.go create mode 100644 ratel/close.go create mode 100644 ratel/compact.go create mode 100644 ratel/configuration.go create mode 100644 ratel/countevents.go create mode 100644 ratel/create-a-tag.go create mode 100644 ratel/del/del.go create mode 100644 ratel/deleteevent.go create mode 100644 ratel/export.go create mode 100644 ratel/fetch-ids.go create mode 100644 ratel/garbagecollector.go create mode 100644 ratel/gccount.go create mode 100644 ratel/gcmark.go create mode 100644 ratel/gcsweep.go create mode 100644 ratel/getecounterkey.go create mode 100644 ratel/getindexkeysforevent.go create mode 100644 ratel/gettagkeyprefix.go create mode 100644 ratel/import.go create mode 100644 ratel/init.go create mode 100644 ratel/keys/arb/arb.go create mode 100644 ratel/keys/arb/arb_test.go create mode 100644 ratel/keys/count/count.go create mode 100644 ratel/keys/createdat/createdat.go create mode 100644 ratel/keys/createdat/createdat_test.go create mode 100644 ratel/keys/fullid/fullid.go create mode 100644 ratel/keys/fullid/fullid_test.go create mode 100644 ratel/keys/fullpubkey/fullpubkey.go create mode 100644 ratel/keys/fullpubkey/fullpubkey_test.go create mode 100644 ratel/keys/id/id.go create mode 100644 ratel/keys/id/id_test.go create mode 100644 ratel/keys/index/index.go create mode 100644 ratel/keys/index/prefixes.go create mode 100644 ratel/keys/keys.go create mode 100644 ratel/keys/keys_test.go create mode 100644 ratel/keys/kinder/kind.go create mode 100644 ratel/keys/kinder/kind_test.go create mode 100644 ratel/keys/pubkey/pubkey.go create mode 100644 ratel/keys/pubkey/pubkey_test.go create mode 100644 ratel/keys/serial/serial.go create mode 100644 ratel/keys/serial/serial_test.go create mode 100644 ratel/keys/tombstone/tombstone.go create mode 100644 ratel/keys/tombstone/tombstone_test.go create mode 100644 ratel/keys/util_test.go create mode 100644 ratel/log.go create mode 100644 ratel/main.go create mode 100644 ratel/nuke.go create mode 100644 ratel/prefixes/index_test.go create mode 100644 ratel/prefixes/prefixes.go create mode 100644 ratel/preparequeries.go create mode 100644 ratel/queryevents.go create mode 100644 ratel/queryforids.go create mode 100644 ratel/rescan.go create mode 100644 ratel/saveevent.go create mode 100644 readme.adoc delete mode 100644 readme.md rename orly.png => realy.png (100%) create mode 100644 realy.service create mode 100644 realy/addEvent.go create mode 100644 realy/auth.go create mode 100644 realy/config/config.go create mode 100644 realy/disconnect.go create mode 100644 realy/doc.go create mode 100644 realy/handleRelayinfo.go create mode 100644 realy/handleWebsocket.go create mode 100644 realy/helpers/helpers.go create mode 100644 realy/interfaces/interfaces.go create mode 100644 realy/options/options.go rename {pointers => realy/pointers}/pointers.go (100%) rename {publish => realy/publish}/publisher.go (66%) create mode 100644 realy/publish/publisher/interface.go create mode 100644 realy/server-impl.go create mode 100644 realy/server-publish.go create mode 100644 realy/server.go create mode 100644 realy/server_test.go create mode 100644 realy/testrelay.go delete mode 100644 reason/reason.go create mode 100644 relay/interface.go create mode 100755 reload.sh create mode 100644 scripts/runtests.sh delete mode 100644 servemux/serveMux.go delete mode 100644 server/add-event.go delete mode 100644 server/handle-relayinfo.go delete mode 100644 server/server.go create mode 100644 sha256/LICENSE create mode 100644 sha256/README.md create mode 100644 sha256/cpuid_other.go create mode 100644 sha256/doc.go create mode 100644 sha256/sha256.go create mode 100644 sha256/sha256_test.go create mode 100644 sha256/sha256blockAvx512_amd64.asm create mode 100644 sha256/sha256blockAvx512_amd64.go create mode 100644 sha256/sha256blockAvx512_amd64.s create mode 100644 sha256/sha256blockAvx512_amd64_test.go create mode 100644 sha256/sha256block_amd64.go create mode 100644 sha256/sha256block_amd64.s create mode 100644 sha256/sha256block_amd64_test.go create mode 100644 sha256/sha256block_arm64.go create mode 100644 sha256/sha256block_arm64.s create mode 100644 sha256/sha256block_other.go create mode 100644 sha256/test-architectures.sh create mode 100644 socketapi/challenge.go create mode 100644 socketapi/handleAuth.go delete mode 100644 socketapi/ok.go create mode 100644 socketapi/pinger.go create mode 100644 socketapi/upgrader.go create mode 100644 tests/generate.go create mode 100644 version/doc.go create mode 100644 ws/client.go create mode 100644 ws/client_test.go create mode 100644 ws/doc.go create mode 100644 ws/pool.go create mode 100644 ws/subscription.go create mode 100644 ws/subscription_test.go diff --git a/.idea/workspace.xml b/.idea/workspace.xml index 9932710..8595069 100644 --- a/.idea/workspace.xml +++ b/.idea/workspace.xml @@ -19,10 +19,401 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/LICENSE b/LICENSE index 0e259d4..fdddb29 100644 --- a/LICENSE +++ b/LICENSE @@ -1,121 +1,24 @@ -Creative Commons Legal Code +This is free and unencumbered software released into the public domain. -CC0 1.0 Universal +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. -Statement of Purpose +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. +For more information, please refer to diff --git a/addresstag/addresstag.go b/addresstag/addresstag.go new file mode 100644 index 0000000..66d36bf --- /dev/null +++ b/addresstag/addresstag.go @@ -0,0 +1,21 @@ +package addresstag + +import ( + "strconv" + "strings" + + "orly.dev/hex" +) + +// DecodeAddressTag unpacks the contents of an `a` tag. +func DecodeAddressTag(tagValue string) (k uint16, pkb []byte, d string) { + split := strings.Split(tagValue, ":") + if len(split) == 3 { + if pkb, _ = hex.Dec(split[1]); len(pkb) == 32 { + if key, err := strconv.ParseUint(split[0], 10, 16); err == nil { + return uint16(key), pkb, split[2] + } + } + } + return +} diff --git a/app/main.go b/app/main.go new file mode 100644 index 0000000..2f88bc8 --- /dev/null +++ b/app/main.go @@ -0,0 +1,81 @@ +// Package app implements the realy nostr relay with a simple follow/mute list authentication scheme and the new HTTP REST based protocol. +package app + +import ( + "net/http" + "sync" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/filters" + "orly.dev/interfaces/store" + "orly.dev/realy/config" +) + +type List map[string]struct{} + +type Relay struct { + sync.Mutex + *config.C + Store store.I +} + +func (r *Relay) Name() string { return r.C.AppName } + +func (r *Relay) Storage() store.I { return r.Store } + +func (r *Relay) Init() (err error) { + // for _, src := range r.C.Owners { + // if len(src) < 1 { + // continue + // } + // dst := make([]byte, len(src)/2) + // if _, err = hex.DecBytes(dst, []byte(src)); chk.E(err) { + // if dst, err = bech32encoding.NpubToBytes([]byte(src)); chk.E(err) { + // continue + // } + // } + // r.owners = append(r.owners, dst) + // } + // if len(r.owners) > 0 { + // log.F.C(func() string { + // ownerIds := make([]string, len(r.owners)) + // for i, npub := range r.owners { + // ownerIds[i] = hex.Enc(npub) + // } + // owners := strings.Join(ownerIds, ",") + // return fmt.Sprintf("owners %s", owners) + // }) + // r.ZeroLists() + // r.CheckOwnerLists(context.Bg()) + // } + return nil +} + +func (r *Relay) AcceptEvent( + c context.T, evt *event.E, hr *http.Request, + origin string, authedPubkey []byte, +) (accept bool, notice string, afterSave func()) { + accept = true + return +} + +func (r *Relay) AcceptFilter( + c context.T, hr *http.Request, f *filter.S, + authedPubkey []byte, +) (allowed *filter.S, ok bool, modified bool) { + allowed = f + ok = true + return +} + +func (r *Relay) AcceptReq( + c context.T, hr *http.Request, id []byte, + ff *filters.T, authedPubkey []byte, +) (allowed *filters.T, ok bool, modified bool) { + + allowed = ff + ok = true + return +} diff --git a/app/resources.go b/app/resources.go new file mode 100644 index 0000000..39b41d2 --- /dev/null +++ b/app/resources.go @@ -0,0 +1,30 @@ +package app + +import ( + "orly.dev/log" + "os" + "runtime" + "time" + + "orly.dev/context" +) + +func MonitorResources(c context.T) { + tick := time.NewTicker(time.Minute * 15) + log.I.Ln("running process", os.Args[0], os.Getpid()) + // memStats := &runtime.MemStats{} + for { + select { + case <-c.Done(): + log.D.Ln("shutting down resource monitor") + return + case <-tick.C: + // runtime.ReadMemStats(memStats) + log.D.Ln( + "# goroutines", runtime.NumGoroutine(), "# cgo calls", + runtime.NumCgoCall(), + ) + // log.D.S(memStats) + } + } +} diff --git a/apputil/doc.go b/apputil/doc.go new file mode 100644 index 0000000..0cde52a --- /dev/null +++ b/apputil/doc.go @@ -0,0 +1,2 @@ +// Package apputil provides some simple filesystem functions +package apputil diff --git a/atomic/.codecov.yml b/atomic/.codecov.yml new file mode 100644 index 0000000..571116c --- /dev/null +++ b/atomic/.codecov.yml @@ -0,0 +1,19 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + +# Also update COVER_IGNORE_PKGS in the Makefile. +ignore: + - /internal/gen-atomicint/ + - /internal/gen-valuewrapper/ diff --git a/atomic/CHANGELOG.md b/atomic/CHANGELOG.md new file mode 100644 index 0000000..71db542 --- /dev/null +++ b/atomic/CHANGELOG.md @@ -0,0 +1,130 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Unreleased +- No changes yet. + +## [1.11.0] - 2023-05-02 +### Fixed +- Fix `Swap` and `CompareAndSwap` for `Value` wrappers without initialization. + +### Added +- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print +underlying values of pointers. + +[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0 + +## [1.10.0] - 2022-08-11 +### Added +- Add `atomic.Float32` type for atomic operations on `float32`. +- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`, + and `atomic.Value`. +- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any + type. This is present only for Go 1.18 or higher, and is a drop-in for + replacement for the standard library's `sync/atomic.Pointer` type. + +### Changed +- Deprecate `CAS` methods on all types in favor of corresponding + `CompareAndSwap` methods. + +Thanks to @eNV25 and @icpd for their contributions to this release. + +[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0 + +## [1.9.0] - 2021-07-15 +### Added +- Add `Float64.Swap` to match int atomic operations. +- Add `atomic.Time` type for atomic operations on `time.Time` values. + +[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0 + +## [1.8.0] - 2021-06-09 +### Added +- Add `atomic.Uintptr` type for atomic operations on `uintptr` values. +- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values. + +[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0 + +## [1.7.0] - 2020-09-14 +### Added +- Support JSON serialization and deserialization of primitive atomic types. +- Support Text marshalling and unmarshalling for string atomics. + +### Changed +- Disallow incorrect comparison of atomic values in a non-atomic way. + +### Removed +- Remove dependency on `golang.org/x/{lint, tools}`. + +[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 + +## [1.6.0] - 2020-02-24 +### Changed +- Drop library dependency on `golang.org/x/{lint, tools}`. + +[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 + +## [1.5.1] - 2019-11-19 +- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together + causing `CAS` to fail even though the old value matches. + +[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 + +## [1.5.0] - 2019-10-29 +### Changed +- With Go modules, only the `go.uber.org/atomic` import path is supported now. + If you need to use the old import path, please add a `replace` directive to + your `go.mod`. + +[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 + +## [1.4.0] - 2019-05-01 +### Added + - Add `atomic.Error` type for atomic operations on `error` values. + +[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 + +## [1.3.2] - 2018-05-02 +### Added +- Add `atomic.Duration` type for atomic operations on `time.Duration` values. + +[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 + +## [1.3.1] - 2017-11-14 +### Fixed +- Revert optimization for `atomic.String.Store("")` which caused data races. + +[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 + +## [1.3.0] - 2017-11-13 +### Added +- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. + +### Changed +- Optimize `atomic.String.Store("")` by avoiding an allocation. + +[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 + +## [1.2.0] - 2017-04-12 +### Added +- Shadow `atomic.Value` from `sync/atomic`. + +[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 + +## [1.1.0] - 2017-03-10 +### Added +- Add atomic `Float64` type. + +### Changed +- Support new `go.uber.org/atomic` import path. + +[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 + +## [1.0.0] - 2016-07-18 + +- Initial release. + +[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/atomic/LICENSE b/atomic/LICENSE new file mode 100644 index 0000000..8765c9f --- /dev/null +++ b/atomic/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/atomic/Makefile b/atomic/Makefile new file mode 100644 index 0000000..53432ab --- /dev/null +++ b/atomic/Makefile @@ -0,0 +1,79 @@ +# Directory to place `go install`ed binaries into. +export GOBIN ?= $(shell pwd)/bin + +GOLINT = $(GOBIN)/golint +GEN_ATOMICINT = $(GOBIN)/gen-atomicint +GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper +STATICCHECK = $(GOBIN)/staticcheck + +GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) + +# Also update ignore section in .codecov.yml. +COVER_IGNORE_PKGS = \ + github.com/p9ds/atomic/internal/gen-atomicint \ + github.com/p9ds/atomic/internal/gen-atomicwrapper + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) + +$(GOLINT): + cd tools && go install golang.org/x/lint/golint + +$(STATICCHECK): + cd tools && go install honnef.co/go/tools/cmd/staticcheck + +$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) + go build -o $@ ./internal/gen-atomicwrapper + +$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) + go build -o $@ ./internal/gen-atomicint + +.PHONY: golint +golint: $(GOLINT) + $(GOLINT) ./... + +.PHONY: staticcheck +staticcheck: $(STATICCHECK) + $(STATICCHECK) ./... + +.PHONY: lint +lint: gofmt golint staticcheck generatenodirty + +# comma separated list of packages to consider for code coverage. +COVER_PKG = $(shell \ + go list -find ./... | \ + grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ + paste -sd, -) + +.PHONY: cover +cover: + go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... + go tool cover -html=cover.out -o cover.html + +.PHONY: generate +generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) + go generate ./... + +.PHONY: generatenodirty +generatenodirty: + @[ -z "$$(git status --porcelain)" ] || ( \ + echo "Working tree is dirty. Commit your changes first."; \ + git status; \ + exit 1 ) + @make generate + @status=$$(git status --porcelain); \ + [ -z "$$status" ] || ( \ + echo "Working tree is dirty after `make generate`:"; \ + echo "$$status"; \ + echo "Please ensure that the generated code is up-to-date." ) diff --git a/atomic/README.md b/atomic/README.md new file mode 100644 index 0000000..3eed44a --- /dev/null +++ b/atomic/README.md @@ -0,0 +1,33 @@ +# atomic + +Simple wrappers for primitive types to enforce atomic access. + +## Installation + +```shell +$ go get -u github.com/mleku/nodl/pkg/atomic@latest +``` + +## Usage + +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `github.com/mleku/nodl/pkg/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CompareAndSwap(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status + +Stable. + +--- + +Released under the [MIT License](LICENSE.txt). \ No newline at end of file diff --git a/atomic/assert_test.go b/atomic/assert_test.go new file mode 100644 index 0000000..47cfbf2 --- /dev/null +++ b/atomic/assert_test.go @@ -0,0 +1,45 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Marks the test as failed if the error cannot be cast into the provided type +// with errors.As. +// +// assertErrorAsType(t, err, new(ErrFoo)) +func assertErrorAsType(t *testing.T, err error, typ interface{}, msgAndArgs ...interface{}) bool { + t.Helper() + + return assert.True(t, errors.As(err, typ), msgAndArgs...) +} + +func assertErrorJSONUnmarshalType(t *testing.T, err error, msgAndArgs ...interface{}) bool { + t.Helper() + + return assertErrorAsType(t, err, new(*json.UnmarshalTypeError), msgAndArgs...) +} diff --git a/atomic/bool.go b/atomic/bool.go new file mode 100644 index 0000000..f0a2ddd --- /dev/null +++ b/atomic/bool.go @@ -0,0 +1,88 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" +) + +// Bool is an atomic type-safe wrapper for bool values. +type Bool struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroBool bool + +// NewBool creates a new Bool. +func NewBool(val bool) *Bool { + x := &Bool{} + if val != _zeroBool { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped bool. +func (x *Bool) Load() bool { + return truthy(x.v.Load()) +} + +// Store atomically stores the passed bool. +func (x *Bool) Store(val bool) { + x.v.Store(boolToInt(val)) +} + +// CAS is an atomic compare-and-swap for bool values. +// +// Deprecated: Use CompareAndSwap. +func (x *Bool) CAS(old, new bool) (swapped bool) { + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for bool values. +func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) { + return x.v.CompareAndSwap(boolToInt(old), boolToInt(new)) +} + +// Swap atomically stores the given bool and returns the old +// value. +func (x *Bool) Swap(val bool) (old bool) { + return truthy(x.v.Swap(boolToInt(val))) +} + +// MarshalJSON encodes the wrapped bool into JSON. +func (x *Bool) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a bool from JSON. +func (x *Bool) UnmarshalJSON(b []byte) error { + var v bool + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/atomic/bool_ext.go b/atomic/bool_ext.go new file mode 100644 index 0000000..a2e60e9 --- /dev/null +++ b/atomic/bool_ext.go @@ -0,0 +1,53 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go + +func truthy(n uint32) bool { + return n == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() (old bool) { + for { + old := b.Load() + if b.CAS(old, !old) { + return old + } + } +} + +// String encodes the wrapped value as a string. +func (b *Bool) String() string { + return strconv.FormatBool(b.Load()) +} diff --git a/atomic/bool_test.go b/atomic/bool_test.go new file mode 100644 index 0000000..6753ebd --- /dev/null +++ b/atomic/bool_test.go @@ -0,0 +1,150 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBool(t *testing.T) { + atom := NewBool(false) + require.False(t, atom.Toggle(), "Expected Toggle to return previous value.") + require.True(t, atom.Toggle(), "Expected Toggle to return previous value.") + require.False(t, atom.Toggle(), "Expected Toggle to return previous value.") + require.True(t, atom.Load(), "Unexpected state after swap.") + + require.True(t, atom.CAS(true, true), "CAS should swap when old matches") + require.True(t, atom.Load(), "CAS should have no effect") + require.True(t, atom.CAS(true, false), "CAS should swap when old matches") + require.False(t, atom.Load(), "CAS should have modified the value") + require.False(t, atom.CAS(true, false), "CAS should fail on old mismatch") + require.False(t, atom.Load(), "CAS should not have modified the value") + + atom.Store(false) + require.False(t, atom.Load(), "Unexpected state after store.") + + prev := atom.Swap(false) + require.False(t, prev, "Expected Swap to return previous value.") + + prev = atom.Swap(true) + require.False(t, prev, "Expected Swap to return previous value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + atom.Store(true) + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("true"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("false"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.False(t, atom.Load(), "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte("42"), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + t.Run("true", func(t *testing.T) { + assert.Equal(t, "true", NewBool(true).String(), + "String() returned an unexpected value.") + }) + + t.Run("false", func(t *testing.T) { + var b Bool + assert.Equal(t, "false", b.String(), + "String() returned an unexpected value.") + }) + }) +} + +func TestBool_InitializeDefaults(t *testing.T) { + tests := []struct { + msg string + newBool func() *Bool + }{ + { + msg: "Uninitialized", + newBool: func() *Bool { + var b Bool + return &b + }, + }, + { + msg: "NewBool with default", + newBool: func() *Bool { + return NewBool(false) + }, + }, + { + msg: "Bool swapped with default", + newBool: func() *Bool { + b := NewBool(true) + b.Swap(false) + return b + }, + }, + { + msg: "Bool CAS'd with default", + newBool: func() *Bool { + b := NewBool(true) + b.CompareAndSwap(true, false) + return b + }, + }, + } + + for _, tt := range tests { + t.Run(tt.msg, func(t *testing.T) { + t.Run("Marshal", func(t *testing.T) { + b := tt.newBool() + marshalled, err := b.MarshalJSON() + require.NoError(t, err) + assert.Equal(t, "false", string(marshalled)) + }) + + t.Run("String", func(t *testing.T) { + b := tt.newBool() + assert.Equal(t, "false", b.String()) + }) + + t.Run("CompareAndSwap", func(t *testing.T) { + b := tt.newBool() + require.True(t, b.CompareAndSwap(false, true)) + assert.Equal(t, true, b.Load()) + }) + + t.Run("Swap", func(t *testing.T) { + b := tt.newBool() + assert.Equal(t, false, b.Swap(true)) + }) + }) + } +} diff --git a/atomic/doc.go b/atomic/doc.go new file mode 100644 index 0000000..ae7390e --- /dev/null +++ b/atomic/doc.go @@ -0,0 +1,23 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic diff --git a/atomic/duration.go b/atomic/duration.go new file mode 100644 index 0000000..7c23868 --- /dev/null +++ b/atomic/duration.go @@ -0,0 +1,89 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "time" +) + +// Duration is an atomic type-safe wrapper for time.Duration values. +type Duration struct { + _ nocmp // disallow non-atomic comparison + + v Int64 +} + +var _zeroDuration time.Duration + +// NewDuration creates a new Duration. +func NewDuration(val time.Duration) *Duration { + x := &Duration{} + if val != _zeroDuration { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Duration. +func (x *Duration) Load() time.Duration { + return time.Duration(x.v.Load()) +} + +// Store atomically stores the passed time.Duration. +func (x *Duration) Store(val time.Duration) { + x.v.Store(int64(val)) +} + +// CAS is an atomic compare-and-swap for time.Duration values. +// +// Deprecated: Use CompareAndSwap. +func (x *Duration) CAS(old, new time.Duration) (swapped bool) { + return x.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for time.Duration values. +func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) { + return x.v.CompareAndSwap(int64(old), int64(new)) +} + +// Swap atomically stores the given time.Duration and returns the old +// value. +func (x *Duration) Swap(val time.Duration) (old time.Duration) { + return time.Duration(x.v.Swap(int64(val))) +} + +// MarshalJSON encodes the wrapped time.Duration into JSON. +func (x *Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a time.Duration from JSON. +func (x *Duration) UnmarshalJSON(b []byte) error { + var v time.Duration + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/atomic/duration_ext.go b/atomic/duration_ext.go new file mode 100644 index 0000000..62a45b3 --- /dev/null +++ b/atomic/duration_ext.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (x *Duration) Add(delta time.Duration) time.Duration { + return time.Duration(x.v.Add(int64(delta))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (x *Duration) Sub(delta time.Duration) time.Duration { + return time.Duration(x.v.Sub(int64(delta))) +} + +// String encodes the wrapped value as a string. +func (x *Duration) String() string { + return x.Load().String() +} diff --git a/atomic/duration_test.go b/atomic/duration_test.go new file mode 100644 index 0000000..f5779fe --- /dev/null +++ b/atomic/duration_test.go @@ -0,0 +1,73 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDuration(t *testing.T) { + atom := NewDuration(5 * time.Minute) + + require.Equal(t, 5*time.Minute, atom.Load(), "Load didn't work.") + require.Equal(t, 6*time.Minute, atom.Add(time.Minute), "Add didn't work.") + require.Equal(t, 4*time.Minute, atom.Sub(2*time.Minute), "Sub didn't work.") + + require.True(t, atom.CAS(4*time.Minute, time.Minute), "CAS didn't report a swap.") + require.Equal(t, time.Minute, atom.Load(), "CAS didn't set the correct value.") + + require.Equal(t, time.Minute, atom.Swap(2*time.Minute), "Swap didn't return the old value.") + require.Equal(t, 2*time.Minute, atom.Load(), "Swap didn't set the correct value.") + + atom.Store(10 * time.Minute) + require.Equal(t, 10*time.Minute, atom.Load(), "Store didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + atom.Store(time.Second) + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("1000000000"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("1000000000"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, time.Second, atom.Load(), + "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte("\"1000000000\""), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + assert.Equal(t, "42s", NewDuration(42*time.Second).String(), + "String() returned an unexpected value.") + }) +} diff --git a/atomic/error.go b/atomic/error.go new file mode 100644 index 0000000..b7e3f12 --- /dev/null +++ b/atomic/error.go @@ -0,0 +1,72 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper for error values. +type Error struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroError error + +// NewError creates a new Error. +func NewError(val error) *Error { + x := &Error{} + if val != _zeroError { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped error. +func (x *Error) Load() error { + return unpackError(x.v.Load()) +} + +// Store atomically stores the passed error. +func (x *Error) Store(val error) { + x.v.Store(packError(val)) +} + +// CompareAndSwap is an atomic compare-and-swap for error values. +func (x *Error) CompareAndSwap(old, new error) (swapped bool) { + if x.v.CompareAndSwap(packError(old), packError(new)) { + return true + } + + if old == _zeroError { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packError(new)) + } + + return false +} + +// Swap atomically stores the given error and returns the old +// value. +func (x *Error) Swap(val error) (old error) { + return unpackError(x.v.Swap(packError(val))) +} diff --git a/atomic/error_ext.go b/atomic/error_ext.go new file mode 100644 index 0000000..d31fb63 --- /dev/null +++ b/atomic/error_ext.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// atomic.Value panics on nil inputs, or if the underlying type changes. +// Stabilize by always storing a custom struct that we control. + +//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go + +type packedError struct{ Value error } + +func packError(v error) interface{} { + return packedError{v} +} + +func unpackError(v interface{}) error { + if err, ok := v.(packedError); ok { + return err.Value + } + return nil +} diff --git a/atomic/error_test.go b/atomic/error_test.go new file mode 100644 index 0000000..1f02e6d --- /dev/null +++ b/atomic/error_test.go @@ -0,0 +1,136 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrorByValue(t *testing.T) { + err := &Error{} + require.Nil(t, err.Load(), "Initial value shall be nil") +} + +func TestNewErrorWithNilArgument(t *testing.T) { + err := NewError(nil) + require.Nil(t, err.Load(), "Initial value shall be nil") +} + +func TestErrorCanStoreNil(t *testing.T) { + err := NewError(errors.New("hello")) + err.Store(nil) + require.Nil(t, err.Load(), "Stored value shall be nil") +} + +func TestNewErrorWithError(t *testing.T) { + err1 := errors.New("hello1") + err2 := errors.New("hello2") + + atom := NewError(err1) + require.Equal(t, err1, atom.Load(), "Expected Load to return initialized value") + + atom.Store(err2) + require.Equal(t, err2, atom.Load(), "Expected Load to return overridden value") +} + +func TestErrorSwap(t *testing.T) { + err1 := errors.New("hello1") + err2 := errors.New("hello2") + + atom := NewError(err1) + require.Equal(t, err1, atom.Load(), "Expected Load to return initialized value") + + old := atom.Swap(err2) + require.Equal(t, err2, atom.Load(), "Expected Load to return overridden value") + require.Equal(t, err1, old, "Expected old to be initial value") +} + +func TestErrorCompareAndSwap(t *testing.T) { + err1 := errors.New("hello1") + err2 := errors.New("hello2") + + atom := NewError(err1) + require.Equal(t, err1, atom.Load(), "Expected Load to return initialized value") + + swapped := atom.CompareAndSwap(err2, err2) + require.False(t, swapped, "Expected swapped to be false") + require.Equal(t, err1, atom.Load(), "Expected Load to return initial value") + + swapped = atom.CompareAndSwap(err1, err2) + require.True(t, swapped, "Expected swapped to be true") + require.Equal(t, err2, atom.Load(), "Expected Load to return overridden value") +} + +func TestError_InitializeDefaults(t *testing.T) { + tests := []struct { + msg string + newError func() *Error + }{ + { + msg: "Uninitialized", + newError: func() *Error { + var e Error + return &e + }, + }, + { + msg: "NewError with default", + newError: func() *Error { + return NewError(nil) + }, + }, + { + msg: "Error swapped with default", + newError: func() *Error { + e := NewError(assert.AnError) + e.Swap(nil) + return e + }, + }, + { + msg: "Error CAS'd with default", + newError: func() *Error { + e := NewError(assert.AnError) + e.CompareAndSwap(assert.AnError, nil) + return e + }, + }, + } + + for _, tt := range tests { + t.Run(tt.msg, func(t *testing.T) { + t.Run("CompareAndSwap", func(t *testing.T) { + e := tt.newError() + require.True(t, e.CompareAndSwap(nil, assert.AnError)) + assert.Equal(t, assert.AnError, e.Load()) + }) + + t.Run("Swap", func(t *testing.T) { + e := tt.newError() + assert.Equal(t, nil, e.Swap(assert.AnError)) + }) + }) + } +} diff --git a/atomic/example_test.go b/atomic/example_test.go new file mode 100644 index 0000000..9b344eb --- /dev/null +++ b/atomic/example_test.go @@ -0,0 +1,43 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic_test + +import ( + "fmt" + + "orly.dev/atomic" +) + +func Example() { + // Uint32 is a thin wrapper around the primitive uint32 type. + var atom atomic.Uint32 + + // The wrapper ensures that all operations are atomic. + atom.Store(42) + fmt.Println(atom.Inc()) + fmt.Println(atom.CompareAndSwap(43, 0)) + fmt.Println(atom.Load()) + + // Output: + // 43 + // true + // 0 +} diff --git a/atomic/float32.go b/atomic/float32.go new file mode 100644 index 0000000..62c3633 --- /dev/null +++ b/atomic/float32.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float32 is an atomic type-safe wrapper for float32 values. +type Float32 struct { + _ nocmp // disallow non-atomic comparison + + v Uint32 +} + +var _zeroFloat32 float32 + +// NewFloat32 creates a new Float32. +func NewFloat32(val float32) *Float32 { + x := &Float32{} + if val != _zeroFloat32 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float32. +func (x *Float32) Load() float32 { + return math.Float32frombits(x.v.Load()) +} + +// Store atomically stores the passed float32. +func (x *Float32) Store(val float32) { + x.v.Store(math.Float32bits(val)) +} + +// Swap atomically stores the given float32 and returns the old +// value. +func (x *Float32) Swap(val float32) (old float32) { + return math.Float32frombits(x.v.Swap(math.Float32bits(val))) +} + +// MarshalJSON encodes the wrapped float32 into JSON. +func (x *Float32) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float32 from JSON. +func (x *Float32) UnmarshalJSON(b []byte) error { + var v float32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/atomic/float32_ext.go b/atomic/float32_ext.go new file mode 100644 index 0000000..b0cd8d9 --- /dev/null +++ b/atomic/float32_ext.go @@ -0,0 +1,76 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go + +// Add atomically adds to the wrapped float32 and returns the new value. +func (f *Float32) Add(delta float32) float32 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float32 and returns the new value. +func (f *Float32) Sub(delta float32) float32 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float32 values. +// +// Deprecated: Use CompareAndSwap +func (f *Float32) CAS(old, new float32) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float32 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) { + return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float32) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32) +} diff --git a/atomic/float32_test.go b/atomic/float32_test.go new file mode 100644 index 0000000..5b7fd51 --- /dev/null +++ b/atomic/float32_test.go @@ -0,0 +1,73 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFloat32(t *testing.T) { + atom := NewFloat32(4.2) + + require.Equal(t, float32(4.2), atom.Load(), "Load didn't work.") + + require.True(t, atom.CAS(4.2, 0.5), "CAS didn't report a swap.") + require.Equal(t, float32(0.5), atom.Load(), "CAS didn't set the correct value.") + require.False(t, atom.CAS(0.0, 1.5), "CAS reported a swap.") + + atom.Store(42.0) + require.Equal(t, float32(42.0), atom.Load(), "Store didn't set the correct value.") + require.Equal(t, float32(42.5), atom.Add(0.5), "Add didn't work.") + require.Equal(t, float32(42.0), atom.Sub(0.5), "Sub didn't work.") + + require.Equal(t, float32(42.0), atom.Swap(45.0), "Swap didn't return the old value.") + require.Equal(t, float32(45.0), atom.Load(), "Swap didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + atom.Store(42.5) + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("42.5"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("40.5"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, float32(40.5), atom.Load(), + "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte("\"40.5\""), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + assert.Equal(t, "42.5", NewFloat32(42.5).String(), + "String() returned an unexpected value.") + }) +} diff --git a/atomic/float64.go b/atomic/float64.go new file mode 100644 index 0000000..5bc11ca --- /dev/null +++ b/atomic/float64.go @@ -0,0 +1,77 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" +) + +// Float64 is an atomic type-safe wrapper for float64 values. +type Float64 struct { + _ nocmp // disallow non-atomic comparison + + v Uint64 +} + +var _zeroFloat64 float64 + +// NewFloat64 creates a new Float64. +func NewFloat64(val float64) *Float64 { + x := &Float64{} + if val != _zeroFloat64 { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped float64. +func (x *Float64) Load() float64 { + return math.Float64frombits(x.v.Load()) +} + +// Store atomically stores the passed float64. +func (x *Float64) Store(val float64) { + x.v.Store(math.Float64bits(val)) +} + +// Swap atomically stores the given float64 and returns the old +// value. +func (x *Float64) Swap(val float64) (old float64) { + return math.Float64frombits(x.v.Swap(math.Float64bits(val))) +} + +// MarshalJSON encodes the wrapped float64 into JSON. +func (x *Float64) MarshalJSON() ([]byte, error) { + return json.Marshal(x.Load()) +} + +// UnmarshalJSON decodes a float64 from JSON. +func (x *Float64) UnmarshalJSON(b []byte) error { + var v float64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil +} diff --git a/atomic/float64_ext.go b/atomic/float64_ext.go new file mode 100644 index 0000000..48c52b0 --- /dev/null +++ b/atomic/float64_ext.go @@ -0,0 +1,76 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "math" + "strconv" +) + +//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(delta float64) float64 { + for { + old := f.Load() + new := old + delta + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(delta float64) float64 { + return f.Add(-delta) +} + +// CAS is an atomic compare-and-swap for float64 values. +// +// Deprecated: Use CompareAndSwap +func (f *Float64) CAS(old, new float64) (swapped bool) { + return f.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap for float64 values. +// +// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators +// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN. +// This avoids typical CompareAndSwap loops from blocking forever, e.g., +// +// for { +// old := atom.Load() +// new = f(old) +// if atom.CompareAndSwap(old, new) { +// break +// } +// } +// +// If CompareAndSwap did not match NaN to match, then the above would loop forever. +func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) { + return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new)) +} + +// String encodes the wrapped value as a string. +func (f *Float64) String() string { + // 'g' is the behavior for floats with %v. + return strconv.FormatFloat(f.Load(), 'g', -1, 64) +} diff --git a/atomic/float64_test.go b/atomic/float64_test.go new file mode 100644 index 0000000..32fbc58 --- /dev/null +++ b/atomic/float64_test.go @@ -0,0 +1,73 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFloat64(t *testing.T) { + atom := NewFloat64(4.2) + + require.Equal(t, float64(4.2), atom.Load(), "Load didn't work.") + + require.True(t, atom.CAS(4.2, 0.5), "CAS didn't report a swap.") + require.Equal(t, float64(0.5), atom.Load(), "CAS didn't set the correct value.") + require.False(t, atom.CAS(0.0, 1.5), "CAS reported a swap.") + + atom.Store(42.0) + require.Equal(t, float64(42.0), atom.Load(), "Store didn't set the correct value.") + require.Equal(t, float64(42.5), atom.Add(0.5), "Add didn't work.") + require.Equal(t, float64(42.0), atom.Sub(0.5), "Sub didn't work.") + + require.Equal(t, float64(42.0), atom.Swap(45.0), "Swap didn't return the old value.") + require.Equal(t, float64(45.0), atom.Load(), "Swap didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + atom.Store(42.5) + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("42.5"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("40.5"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, float64(40.5), atom.Load(), + "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte("\"40.5\""), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + assert.Equal(t, "42.5", NewFloat64(42.5).String(), + "String() returned an unexpected value.") + }) +} diff --git a/atomic/gen.go b/atomic/gen.go new file mode 100644 index 0000000..1e9ef4f --- /dev/null +++ b/atomic/gen.go @@ -0,0 +1,27 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go +//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go +//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go +//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go +//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go diff --git a/atomic/int32.go b/atomic/int32.go new file mode 100644 index 0000000..5320eac --- /dev/null +++ b/atomic/int32.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int32 is an atomic wrapper around int32. +type Int32 struct { + _ nocmp // disallow non-atomic comparison + + v int32 +} + +// NewInt32 creates a new Int32. +func NewInt32(val int32) *Int32 { + return &Int32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(delta int32) int32 { + return atomic.AddInt32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(delta int32) int32 { + return atomic.AddInt32(&i.v, -delta) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Int32) CAS(old, new int32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(val int32) { + atomic.StoreInt32(&i.v, val) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(val int32) (old int32) { + return atomic.SwapInt32(&i.v, val) +} + +// MarshalJSON encodes the wrapped int32 into JSON. +func (i *Int32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int32. +func (i *Int32) UnmarshalJSON(b []byte) error { + var v int32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int32) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/atomic/int32_test.go b/atomic/int32_test.go new file mode 100644 index 0000000..9992251 --- /dev/null +++ b/atomic/int32_test.go @@ -0,0 +1,82 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInt32(t *testing.T) { + atom := NewInt32(42) + + require.Equal(t, int32(42), atom.Load(), "Load didn't work.") + require.Equal(t, int32(46), atom.Add(4), "Add didn't work.") + require.Equal(t, int32(44), atom.Sub(2), "Sub didn't work.") + require.Equal(t, int32(45), atom.Inc(), "Inc didn't work.") + require.Equal(t, int32(44), atom.Dec(), "Dec didn't work.") + + require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.") + require.Equal(t, int32(0), atom.Load(), "CAS didn't set the correct value.") + + require.Equal(t, int32(0), atom.Swap(1), "Swap didn't return the old value.") + require.Equal(t, int32(1), atom.Load(), "Swap didn't set the correct value.") + + atom.Store(42) + require.Equal(t, int32(42), atom.Load(), "Store didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("40"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, int32(40), atom.Load(), "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte(`"40"`), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + t.Run("positive", func(t *testing.T) { + atom := NewInt32(math.MaxInt32) + assert.Equal(t, "2147483647", atom.String(), + "String() returned an unexpected value.") + }) + + t.Run("negative", func(t *testing.T) { + atom := NewInt32(math.MinInt32) + assert.Equal(t, "-2147483648", atom.String(), + "String() returned an unexpected value.") + }) + }) +} diff --git a/atomic/int64.go b/atomic/int64.go new file mode 100644 index 0000000..460821d --- /dev/null +++ b/atomic/int64.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Int64 is an atomic wrapper around int64. +type Int64 struct { + _ nocmp // disallow non-atomic comparison + + v int64 +} + +// NewInt64 creates a new Int64. +func NewInt64(val int64) *Int64 { + return &Int64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(delta int64) int64 { + return atomic.AddInt64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(delta int64) int64 { + return atomic.AddInt64(&i.v, -delta) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Int64) CAS(old, new int64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(val int64) { + atomic.StoreInt64(&i.v, val) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(val int64) (old int64) { + return atomic.SwapInt64(&i.v, val) +} + +// MarshalJSON encodes the wrapped int64 into JSON. +func (i *Int64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped int64. +func (i *Int64) UnmarshalJSON(b []byte) error { + var v int64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Int64) String() string { + v := i.Load() + return strconv.FormatInt(int64(v), 10) +} diff --git a/atomic/int64_test.go b/atomic/int64_test.go new file mode 100644 index 0000000..ed5a104 --- /dev/null +++ b/atomic/int64_test.go @@ -0,0 +1,82 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInt64(t *testing.T) { + atom := NewInt64(42) + + require.Equal(t, int64(42), atom.Load(), "Load didn't work.") + require.Equal(t, int64(46), atom.Add(4), "Add didn't work.") + require.Equal(t, int64(44), atom.Sub(2), "Sub didn't work.") + require.Equal(t, int64(45), atom.Inc(), "Inc didn't work.") + require.Equal(t, int64(44), atom.Dec(), "Dec didn't work.") + + require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.") + require.Equal(t, int64(0), atom.Load(), "CAS didn't set the correct value.") + + require.Equal(t, int64(0), atom.Swap(1), "Swap didn't return the old value.") + require.Equal(t, int64(1), atom.Load(), "Swap didn't set the correct value.") + + atom.Store(42) + require.Equal(t, int64(42), atom.Load(), "Store didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("40"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, int64(40), atom.Load(), "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte(`"40"`), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + t.Run("positive", func(t *testing.T) { + atom := NewInt64(math.MaxInt64) + assert.Equal(t, "9223372036854775807", atom.String(), + "String() returned an unexpected value.") + }) + + t.Run("negative", func(t *testing.T) { + atom := NewInt64(math.MinInt64) + assert.Equal(t, "-9223372036854775808", atom.String(), + "String() returned an unexpected value.") + }) + }) +} diff --git a/atomic/internal/gen-atomicint/main.go b/atomic/internal/gen-atomicint/main.go new file mode 100644 index 0000000..719fe9c --- /dev/null +++ b/atomic/internal/gen-atomicint/main.go @@ -0,0 +1,116 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// gen-atomicint generates an atomic wrapper around an integer type. +// +// gen-atomicint -name Int32 -wrapped int32 -file out.go +// +// The generated wrapper will use the functions in the sync/atomic package +// named after the generated type. +package main + +import ( + "bytes" + "embed" + "errors" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "text/template" + "time" +) + +func main() { + log.SetFlags(0) + if err := run(os.Args[1:]); err != nil { + log.Fatalf("%+v", err) + } +} + +func run(args []string) error { + var opts struct { + Name string + Wrapped string + File string + Unsigned bool + } + + flag := flag.NewFlagSet("gen-atomicint", flag.ContinueOnError) + + flag.StringVar(&opts.Name, "name", "", "name of the generated type (e.g. Int32)") + flag.StringVar(&opts.Wrapped, "wrapped", "", "name of the wrapped type (e.g. int32)") + flag.StringVar(&opts.File, "file", "", "output file path (default: stdout)") + flag.BoolVar(&opts.Unsigned, "unsigned", false, "whether the type is unsigned") + + if err := flag.Parse(args); err != nil { + return err + } + + if len(opts.Name) == 0 || len(opts.Wrapped) == 0 { + return errors.New("flags -name and -wrapped are required") + } + + var w io.Writer = os.Stdout + if file := opts.File; len(file) > 0 { + f, err := os.Create(file) + if err != nil { + return fmt.Errorf("create %q: %v", file, err) + } + defer f.Close() + + w = f + } + + data := struct { + Name string + Wrapped string + Unsigned bool + ToYear int + }{ + Name: opts.Name, + Wrapped: opts.Wrapped, + Unsigned: opts.Unsigned, + ToYear: time.Now().Year(), + } + + var buff bytes.Buffer + if err := _tmpl.ExecuteTemplate(&buff, "wrapper.tmpl", data); err != nil { + return fmt.Errorf("render template: %v", err) + } + + bs, err := format.Source(buff.Bytes()) + if err != nil { + return fmt.Errorf("reformat source: %v", err) + } + + io.WriteString(w, "// @generated Code generated by gen-atomicint.\n\n") + _, err = w.Write(bs) + return err +} + +var ( + //go:embed *.tmpl + _tmplFS embed.FS + + _tmpl = template.Must(template.New("atomicint").ParseFS(_tmplFS, "*.tmpl")) +) diff --git a/atomic/internal/gen-atomicint/wrapper.tmpl b/atomic/internal/gen-atomicint/wrapper.tmpl new file mode 100644 index 0000000..502fadc --- /dev/null +++ b/atomic/internal/gen-atomicint/wrapper.tmpl @@ -0,0 +1,117 @@ +// Copyright (c) 2020-{{.ToYear}} Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// {{ .Name }} is an atomic wrapper around {{ .Wrapped }}. +type {{ .Name }} struct { + _ nocmp // disallow non-atomic comparison + + v {{ .Wrapped }} +} + +// New{{ .Name }} creates a new {{ .Name }}. +func New{{ .Name }}(val {{ .Wrapped }}) *{{ .Name }} { + return &{{ .Name }}{v: val} +} + +// Load atomically loads the wrapped value. +func (i *{{ .Name }}) Load() {{ .Wrapped }} { + return atomic.Load{{ .Name }}(&i.v) +} + +// Add atomically adds to the wrapped {{ .Wrapped }} and returns the new value. +func (i *{{ .Name }}) Add(delta {{ .Wrapped }}) {{ .Wrapped }} { + return atomic.Add{{ .Name }}(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped {{ .Wrapped }} and returns the new value. +func (i *{{ .Name }}) Sub(delta {{ .Wrapped }}) {{ .Wrapped }} { + return atomic.Add{{ .Name }}(&i.v, + {{- if .Unsigned -}} + ^(delta - 1) + {{- else -}} + -delta + {{- end -}} + ) +} + +// Inc atomically increments the wrapped {{ .Wrapped }} and returns the new value. +func (i *{{ .Name }}) Inc() {{ .Wrapped }} { + return i.Add(1) +} + +// Dec atomically decrements the wrapped {{ .Wrapped }} and returns the new value. +func (i *{{ .Name }}) Dec() {{ .Wrapped }} { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *{{ .Name }}) CAS(old, new {{ .Wrapped }}) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *{{ .Name }}) CompareAndSwap(old, new {{ .Wrapped }}) (swapped bool) { + return atomic.CompareAndSwap{{ .Name }}(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *{{ .Name }}) Store(val {{ .Wrapped }}) { + atomic.Store{{ .Name }}(&i.v, val) +} + +// Swap atomically swaps the wrapped {{ .Wrapped }} and returns the old value. +func (i *{{ .Name }}) Swap(val {{ .Wrapped }}) (old {{ .Wrapped }}) { + return atomic.Swap{{ .Name }}(&i.v, val) +} + +// MarshalJSON encodes the wrapped {{ .Wrapped }} into JSON. +func (i *{{ .Name }}) MarshalJSON() (by, er) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped {{ .Wrapped }}. +func (i *{{ .Name }}) UnmarshalJSON(b by) er { + var v {{ .Wrapped }} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *{{ .Name }}) String() string { + v := i.Load() + {{ if .Unsigned -}} + return strconv.FormatUint(uint64(v), 10) + {{- else -}} + return strconv.FormatInt(int64(v), 10) + {{- end }} +} diff --git a/atomic/internal/gen-atomicwrapper/main.go b/atomic/internal/gen-atomicwrapper/main.go new file mode 100644 index 0000000..26683cd --- /dev/null +++ b/atomic/internal/gen-atomicwrapper/main.go @@ -0,0 +1,203 @@ +// Copyright (c) 2020-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// gen-atomicwrapper generates wrapper types around other atomic types. +// +// It supports plugging in functions which convert the value inside the atomic +// type to the user-facing value. For example, +// +// Given, atomic.Value and the functions, +// +// func packString(string) enveloper{} +// func unpackString(enveloper{}) string +// +// We can run the following command: +// +// gen-atomicwrapper -name String -wrapped Value \ +// -type string -pack fromString -unpack tostring +// +// This wil generate approximately, +// +// type String struct{ v Value } +// +// func (s *String) Load() string { +// return unpackString(v.Load()) +// } +// +// func (s *String) Store(s string) { +// return s.v.Store(packString(s)) +// } +// +// The packing/unpacking logic allows the stored value to be different from +// the user-facing value. +package main + +import ( + "bytes" + "embed" + "errors" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "sort" + "strings" + "text/template" + "time" +) + +func main() { + log.SetFlags(0) + if err := run(os.Args[1:]); err != nil { + log.Fatalf("%+v", err) + } +} + +type stringList []string + +func (sl *stringList) String() string { + return strings.Join(*sl, ",") +} + +func (sl *stringList) Set(s string) error { + for _, i := range strings.Split(s, ",") { + *sl = append(*sl, strings.TrimSpace(i)) + } + return nil +} + +func run(args []string) error { + var opts struct { + Name string + Wrapped string + Type string + + Imports stringList + Pack, Unpack string + + CAS bool + CompareAndSwap bool + Swap bool + JSON bool + + File string + ToYear int + } + + opts.ToYear = time.Now().Year() + + fl := flag.NewFlagSet("gen-atomicwrapper", flag.ContinueOnError) + + // Required flags + fl.StringVar(&opts.Name, "name", "", + "name of the generated type (e.g. Duration)") + fl.StringVar(&opts.Wrapped, "wrapped", "", + "name of the wrapped atomic (e.g. Int64)") + fl.StringVar(&opts.Type, "type", "", + "name of the type exposed by the atomic (e.g. time.Duration)") + + // Optional flags + fl.Var(&opts.Imports, "imports", + "comma separated list of imports to add") + fl.StringVar(&opts.Pack, "pack", "", + "function to transform values with before storage") + fl.StringVar(&opts.Unpack, "unpack", "", + "function to reverse packing on loading") + fl.StringVar(&opts.File, "file", "", + "output file path (default: stdout)") + + // Switches for individual methods. Underlying atomics must support + // these. + fl.BoolVar(&opts.CAS, "cas", false, + "generate a deprecated `CAS(old, new) bool` method; requires -pack") + fl.BoolVar(&opts.CompareAndSwap, "compareandswap", false, + "generate a `CompareAndSwap(old, new) bool` method; requires -pack") + fl.BoolVar(&opts.Swap, "swap", false, + "generate a `Swap(new) old` method; requires -pack and -unpack") + fl.BoolVar(&opts.JSON, "json", false, + "generate `Marshal/UnmarshJSON` methods") + + if err := fl.Parse(args); err != nil { + return err + } + + if len(opts.Name) == 0 || + len(opts.Wrapped) == 0 || + len(opts.Type) == 0 || + len(opts.Pack) == 0 || + len(opts.Unpack) == 0 { + return errors.New("flags -name, -wrapped, -pack, -unpack and -type are required") + } + + if opts.CAS { + opts.CompareAndSwap = true + } + + var w io.Writer = os.Stdout + if file := opts.File; len(file) > 0 { + f, err := os.Create(file) + if err != nil { + return fmt.Errorf("create %q: %v", file, err) + } + defer f.Close() + + w = f + } + + // Import encoding/json if needed. + if opts.JSON { + found := false + for _, imp := range opts.Imports { + if imp == "encoding/json" { + found = true + break + } + } + + if !found { + opts.Imports = append(opts.Imports, "encoding/json") + } + } + + sort.Strings(opts.Imports) + + var buff bytes.Buffer + if err := _tmpl.ExecuteTemplate(&buff, "wrapper.tmpl", opts); err != nil { + return fmt.Errorf("render template: %v", err) + } + + bs, err := format.Source(buff.Bytes()) + if err != nil { + return fmt.Errorf("reformat source: %v", err) + } + + io.WriteString(w, "// @generated Code generated by gen-atomicwrapper.\n\n") + _, err = w.Write(bs) + return err +} + +var ( + //go:embed *.tmpl + _tmplFS embed.FS + + _tmpl = template.Must(template.New("atomicwrapper").ParseFS(_tmplFS, "*.tmpl")) +) diff --git a/atomic/internal/gen-atomicwrapper/wrapper.tmpl b/atomic/internal/gen-atomicwrapper/wrapper.tmpl new file mode 100644 index 0000000..6ed6a9e --- /dev/null +++ b/atomic/internal/gen-atomicwrapper/wrapper.tmpl @@ -0,0 +1,120 @@ +// Copyright (c) 2020-{{.ToYear}} Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +{{ with .Imports }} +import ( + {{ range . -}} + {{ printf "%q" . }} + {{ end }} +) +{{ end }} + +// {{ .Name }} is an atomic type-safe wrapper for {{ .Type }} values. +type {{ .Name }} struct{ + _ nocmp // disallow non-atomic comparison + + v {{ .Wrapped }} +} + +var _zero{{ .Name }} {{ .Type }} + + +// New{{ .Name }} creates a new {{ .Name }}. +func New{{ .Name }}(val {{ .Type }}) *{{ .Name }} { + x := &{{ .Name }}{} + if val != _zero{{ .Name }} { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped {{ .Type }}. +func (x *{{ .Name }}) Load() {{ .Type }} { + {{ if .Unpack -}} + return {{ .Unpack }}(x.v.Load()) + {{- else -}} + if v := x.v.Load(); v != nil { + return v.({{ .Type }}) + } + return _zero{{ .Name }} + {{- end }} +} + +// Store atomically stores the passed {{ .Type }}. +func (x *{{ .Name }}) Store(val {{ .Type }}) { + x.v.Store({{ .Pack }}(val)) +} + +{{ if .CAS -}} + // CAS is an atomic compare-and-swap for {{ .Type }} values. + // + // Deprecated: Use CompareAndSwap. + func (x *{{ .Name }}) CAS(old, new {{ .Type }}) (swapped bool) { + return x.CompareAndSwap(old, new) + } +{{- end }} + +{{ if .CompareAndSwap -}} + // CompareAndSwap is an atomic compare-and-swap for {{ .Type }} values. + func (x *{{ .Name }}) CompareAndSwap(old, new {{ .Type }}) (swapped bool) { + {{ if eq .Wrapped "Value" -}} + if x.v.CompareAndSwap({{ .Pack }}(old), {{ .Pack }}(new)) { + return true + } + + if old == _zero{{ .Name }} { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, {{ .Pack }}(new)) + } + + return false + {{- else -}} + return x.v.CompareAndSwap({{ .Pack }}(old), {{ .Pack }}(new)) + {{- end }} + } +{{- end }} + +{{ if .Swap -}} + // Swap atomically stores the given {{ .Type }} and returns the old + // value. + func (x *{{ .Name }}) Swap(val {{ .Type }}) (old {{ .Type }}) { + return {{ .Unpack }}(x.v.Swap({{ .Pack }}(val))) + } +{{- end }} + +{{ if .JSON -}} + // MarshalJSON encodes the wrapped {{ .Type }} into JSON. + func (x *{{ .Name }}) MarshalJSON() (by, er) { + return json.Marshal(x.Load()) + } + + // UnmarshalJSON decodes a {{ .Type }} from JSON. + func (x *{{ .Name }}) UnmarshalJSON(b by) er { + var v {{ .Type }} + if err := json.Unmarshal(b, &v); err != nil { + return err + } + x.Store(v) + return nil + } +{{- end }} diff --git a/atomic/nocmp.go b/atomic/nocmp.go new file mode 100644 index 0000000..54b7417 --- /dev/null +++ b/atomic/nocmp.go @@ -0,0 +1,35 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// nocmp is an uncomparable struct. Embed this inside another struct to make +// it uncomparable. +// +// type Foo struct { +// nocmp +// // ... +// } +// +// This DOES NOT: +// +// - Disallow shallow copies of structs +// - Disallow comparison of pointers to uncomparable structs +type nocmp [0]func() diff --git a/atomic/nocmp_test.go b/atomic/nocmp_test.go new file mode 100644 index 0000000..8719421 --- /dev/null +++ b/atomic/nocmp_test.go @@ -0,0 +1,164 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNocmpComparability(t *testing.T) { + tests := []struct { + desc string + give interface{} + comparable bool + }{ + { + desc: "nocmp struct", + give: nocmp{}, + }, + { + desc: "struct with nocmp embedded", + give: struct{ nocmp }{}, + }, + { + desc: "pointer to struct with nocmp embedded", + give: &struct{ nocmp }{}, + comparable: true, + }, + + // All exported types must be uncomparable. + {desc: "Bool", give: Bool{}}, + {desc: "Duration", give: Duration{}}, + {desc: "Error", give: Error{}}, + {desc: "Float64", give: Float64{}}, + {desc: "Int32", give: Int32{}}, + {desc: "Int64", give: Int64{}}, + {desc: "String", give: String{}}, + {desc: "Uint32", give: Uint32{}}, + {desc: "Uint64", give: Uint64{}}, + {desc: "Value", give: Value{}}, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + typ := reflect.TypeOf(tt.give) + assert.Equalf(t, tt.comparable, typ.Comparable(), + "type %v comparablity mismatch", typ) + }) + } +} + +// nocmp must not add to the size of a struct in-memory. +func TestNocmpSize(t *testing.T) { + type x struct{ _ int } + + before := reflect.TypeOf(x{}).Size() + + type y struct { + _ nocmp + _ x + } + + after := reflect.TypeOf(y{}).Size() + + assert.Equal(t, before, after, + "expected nocmp to have no effect on struct size") +} + +// This test will fail to compile if we disallow copying of nocmp. +// +// We need to allow this so that users can do, +// +// var x atomic.Int32 +// x = atomic.NewInt32(1) +func TestNocmpCopy(t *testing.T) { + type foo struct{ _ nocmp } + + t.Run("struct copy", func(t *testing.T) { + a := foo{} + b := a + _ = b // unused + }) + + t.Run("pointer copy", func(t *testing.T) { + a := &foo{} + b := *a + _ = b // unused + }) +} + +// Fake go.mod with no dependencies. +const _exampleGoMod = `module example.com/nocmp` + +const _badFile = `package atomic + +import "fmt" + +type Int64 struct { + nocmp + + v int64 +} + +func shouldNotCompile() { + var x, y Int64 + fmt.Println(x == y) +} +` + +func TestNocmpIntegration(t *testing.T) { + tempdir := t.TempDir() + + nocmp, err := os.ReadFile("nocmp.go") + require.NoError(t, err, "unable to read nocmp.go") + + require.NoError(t, + os.WriteFile(filepath.Join(tempdir, "go.mod"), []byte(_exampleGoMod), 0o644), + "unable to write go.mod") + + require.NoError(t, + os.WriteFile(filepath.Join(tempdir, "nocmp.go"), nocmp, 0o644), + "unable to write nocmp.go") + + require.NoError(t, + os.WriteFile(filepath.Join(tempdir, "bad.go"), []byte(_badFile), 0o644), + "unable to write bad.go") + + var stderr bytes.Buffer + cmd := exec.Command("go", "build") + cmd.Dir = tempdir + // Create a minimal build environment with only HOME set so that "go + // build" has somewhere to put the cache and other Go files in. + cmd.Env = []string{"HOME=" + filepath.Join(tempdir, "home")} + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), "bad.go must not compile") + + assert.Contains(t, stderr.String(), + "struct containing nocmp cannot be compared") +} diff --git a/atomic/pointer_test.go b/atomic/pointer_test.go new file mode 100644 index 0000000..837bd45 --- /dev/null +++ b/atomic/pointer_test.go @@ -0,0 +1,100 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 +// +build go1.18 + +package atomic + +// +// import ( +// "fmt" +// "testing" +// +// "github.com/stretchr/testify/require" +// ) +// +// func TestPointer(t *testing.T) { +// type foo struct{ v int } +// +// i := foo{42} +// j := foo{0} +// k := foo{1} +// +// tests := []struct { +// desc string +// newAtomic func() *Pointer[foo] +// initial *foo +// }{ +// { +// desc: "New", +// newAtomic: func() *Pointer[foo] { +// return NewPointer(&i) +// }, +// initial: &i, +// }, +// { +// desc: "New/nil", +// newAtomic: func() *Pointer[foo] { +// return NewPointer[foo](nil) +// }, +// initial: nil, +// }, +// { +// desc: "zero value", +// newAtomic: func() *Pointer[foo] { +// var p Pointer[foo] +// return &p +// }, +// initial: nil, +// }, +// } +// +// for _, tt := range tests { +// t.Run(tt.desc, func(t *testing.T) { +// t.Run("Load", func(t *testing.T) { +// atom := tt.newAtomic() +// require.Equal(t, tt.initial, atom.Load(), "Load should report nil.") +// }) +// +// t.Run("Swap", func(t *testing.T) { +// atom := tt.newAtomic() +// require.Equal(t, tt.initial, atom.Swap(&k), "Swap didn't return the old value.") +// require.Equal(t, &k, atom.Load(), "Swap didn't set the correct value.") +// }) +// +// t.Run("CAS", func(t *testing.T) { +// atom := tt.newAtomic() +// require.True(t, atom.CompareAndSwap(tt.initial, &j), "CAS didn't report a swap.") +// require.Equal(t, &j, atom.Load(), "CAS didn't set the correct value.") +// }) +// +// t.Run("Store", func(t *testing.T) { +// atom := tt.newAtomic() +// atom.Store(&i) +// require.Equal(t, &i, atom.Load(), "Store didn't set the correct value.") +// }) +// t.Run("String", func(t *testing.T) { +// atom := tt.newAtomic() +// require.Equal(t, fmt.Sprint(tt.initial), atom.String(), "String did not return the correct value.") +// }) +// }) +// } +// } diff --git a/atomic/stress_test.go b/atomic/stress_test.go new file mode 100644 index 0000000..0ac7ac5 --- /dev/null +++ b/atomic/stress_test.go @@ -0,0 +1,289 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "errors" + "math" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +const ( + _parallelism = 4 + _iterations = 1000 +) + +var _stressTests = map[string]func() func(){ + "i32/std": stressStdInt32, + "i32": stressInt32, + "i64/std": stressStdInt64, + "i64": stressInt64, + "u32/std": stressStdUint32, + "u32": stressUint32, + "u64/std": stressStdUint64, + "u64": stressUint64, + "f64": stressFloat64, + "bool": stressBool, + "string": stressString, + "duration": stressDuration, + "error": stressError, + "time": stressTime, +} + +func TestStress(t *testing.T) { + for name, ff := range _stressTests { + t.Run(name, func(t *testing.T) { + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(_parallelism)) + + start := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(_parallelism) + f := ff() + for i := 0; i < _parallelism; i++ { + go func() { + defer wg.Done() + <-start + for j := 0; j < _iterations; j++ { + f() + } + }() + } + close(start) + wg.Wait() + }) + } +} + +func BenchmarkStress(b *testing.B) { + for name, ff := range _stressTests { + b.Run(name, func(b *testing.B) { + f := ff() + + b.Run("serial", func(b *testing.B) { + for i := 0; i < b.N; i++ { + f() + } + }) + + b.Run("parallel", func(b *testing.B) { + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + f() + } + }) + }) + }) + } +} + +func stressStdInt32() func() { + var atom int32 + return func() { + atomic.LoadInt32(&atom) + atomic.AddInt32(&atom, 1) + atomic.AddInt32(&atom, -2) + atomic.AddInt32(&atom, 1) + atomic.AddInt32(&atom, -1) + atomic.CompareAndSwapInt32(&atom, 1, 0) + atomic.SwapInt32(&atom, 5) + atomic.StoreInt32(&atom, 1) + } +} + +func stressInt32() func() { + var atom Int32 + return func() { + atom.Load() + atom.Add(1) + atom.Sub(2) + atom.Inc() + atom.Dec() + atom.CAS(1, 0) + atom.Swap(5) + atom.Store(1) + } +} + +func stressStdInt64() func() { + var atom int64 + return func() { + atomic.LoadInt64(&atom) + atomic.AddInt64(&atom, 1) + atomic.AddInt64(&atom, -2) + atomic.AddInt64(&atom, 1) + atomic.AddInt64(&atom, -1) + atomic.CompareAndSwapInt64(&atom, 1, 0) + atomic.SwapInt64(&atom, 5) + atomic.StoreInt64(&atom, 1) + } +} + +func stressInt64() func() { + var atom Int64 + return func() { + atom.Load() + atom.Add(1) + atom.Sub(2) + atom.Inc() + atom.Dec() + atom.CAS(1, 0) + atom.Swap(5) + atom.Store(1) + } +} + +func stressStdUint32() func() { + var atom uint32 + return func() { + atomic.LoadUint32(&atom) + atomic.AddUint32(&atom, 1) + // Adding `MaxUint32` is the same as subtracting 1 + atomic.AddUint32(&atom, math.MaxUint32-1) + atomic.AddUint32(&atom, 1) + atomic.AddUint32(&atom, math.MaxUint32) + atomic.CompareAndSwapUint32(&atom, 1, 0) + atomic.SwapUint32(&atom, 5) + atomic.StoreUint32(&atom, 1) + } +} + +func stressUint32() func() { + var atom Uint32 + return func() { + atom.Load() + atom.Add(1) + atom.Sub(2) + atom.Inc() + atom.Dec() + atom.CAS(1, 0) + atom.Swap(5) + atom.Store(1) + } +} + +func stressStdUint64() func() { + var atom uint64 + return func() { + atomic.LoadUint64(&atom) + atomic.AddUint64(&atom, 1) + // Adding `MaxUint64` is the same as subtracting 1 + atomic.AddUint64(&atom, math.MaxUint64-1) + atomic.AddUint64(&atom, 1) + atomic.AddUint64(&atom, math.MaxUint64) + atomic.CompareAndSwapUint64(&atom, 1, 0) + atomic.SwapUint64(&atom, 5) + atomic.StoreUint64(&atom, 1) + } +} + +func stressUint64() func() { + var atom Uint64 + return func() { + atom.Load() + atom.Add(1) + atom.Sub(2) + atom.Inc() + atom.Dec() + atom.CAS(1, 0) + atom.Swap(5) + atom.Store(1) + } +} + +func stressFloat64() func() { + var atom Float64 + return func() { + atom.Load() + atom.CAS(1.0, 0.1) + atom.Add(1.1) + atom.Sub(0.2) + atom.Store(1.0) + } +} + +func stressBool() func() { + var atom Bool + return func() { + atom.Load() + atom.Store(false) + atom.Swap(true) + atom.CAS(true, false) + atom.CAS(true, false) + atom.Load() + atom.Toggle() + atom.Toggle() + } +} + +func stressString() func() { + var atom String + return func() { + atom.Load() + atom.Store("abc") + atom.Load() + atom.Store("def") + atom.Load() + atom.Store("") + } +} + +func stressDuration() func() { + var atom = NewDuration(0) + return func() { + atom.Load() + atom.Add(1) + atom.Sub(2) + atom.CAS(1, 0) + atom.Swap(5) + atom.Store(1) + } +} + +func stressError() func() { + var atom = NewError(nil) + var err1 = errors.New("err1") + var err2 = errors.New("err2") + return func() { + atom.Load() + atom.Store(err1) + atom.Load() + atom.Store(err2) + atom.Load() + atom.Store(nil) + } +} + +func stressTime() func() { + var atom = NewTime(time.Date(2021, 6, 17, 9, 0, 0, 0, time.UTC)) + var dayAgo = time.Date(2021, 6, 16, 9, 0, 0, 0, time.UTC) + var weekAgo = time.Date(2021, 6, 10, 9, 0, 0, 0, time.UTC) + return func() { + atom.Load() + atom.Store(dayAgo) + atom.Load() + atom.Store(weekAgo) + atom.Store(time.Time{}) + } +} diff --git a/atomic/string.go b/atomic/string.go new file mode 100644 index 0000000..061466c --- /dev/null +++ b/atomic/string.go @@ -0,0 +1,72 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper for string values. +type String struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroString string + +// NewString creates a new String. +func NewString(val string) *String { + x := &String{} + if val != _zeroString { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped string. +func (x *String) Load() string { + return unpackString(x.v.Load()) +} + +// Store atomically stores the passed string. +func (x *String) Store(val string) { + x.v.Store(packString(val)) +} + +// CompareAndSwap is an atomic compare-and-swap for string values. +func (x *String) CompareAndSwap(old, new string) (swapped bool) { + if x.v.CompareAndSwap(packString(old), packString(new)) { + return true + } + + if old == _zeroString { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packString(new)) + } + + return false +} + +// Swap atomically stores the given string and returns the old +// value. +func (x *String) Swap(val string) (old string) { + return unpackString(x.v.Swap(packString(val))) +} diff --git a/atomic/string_ext.go b/atomic/string_ext.go new file mode 100644 index 0000000..019109c --- /dev/null +++ b/atomic/string_ext.go @@ -0,0 +1,54 @@ +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go + +func packString(s string) interface{} { + return s +} + +func unpackString(v interface{}) string { + if s, ok := v.(string); ok { + return s + } + return "" +} + +// String returns the wrapped value. +func (s *String) String() string { + return s.Load() +} + +// MarshalText encodes the wrapped string into a textual form. +// +// This makes it encodable as JSON, YAML, XML, and more. +func (s *String) MarshalText() ([]byte, error) { + return []byte(s.Load()), nil +} + +// UnmarshalText decodes text and replaces the wrapped string with it. +// +// This makes it decodable from JSON, YAML, XML, and more. +func (s *String) UnmarshalText(b []byte) error { + s.Store(string(b)) + return nil +} diff --git a/atomic/string_test.go b/atomic/string_test.go new file mode 100644 index 0000000..6163113 --- /dev/null +++ b/atomic/string_test.go @@ -0,0 +1,170 @@ +// Copyright (c) 2016-2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "encoding/xml" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStringNoInitialValue(t *testing.T) { + atom := &String{} + require.Equal(t, "", atom.Load(), "Initial value should be blank string") +} + +func TestString(t *testing.T) { + atom := NewString("") + require.Equal(t, "", atom.Load(), "Expected Load to return initialized value") + + atom.Store("abc") + require.Equal(t, "abc", atom.Load(), "Unexpected value after Store") + + atom = NewString("bcd") + require.Equal(t, "bcd", atom.Load(), "Expected Load to return initialized value") + + t.Run("JSON/Marshal", func(t *testing.T) { + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte(`"bcd"`), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte(`"abc"`), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, "abc", atom.Load(), "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte("42"), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + atom = NewString("foo") + + t.Run("XML/Marshal", func(t *testing.T) { + bytes, err := xml.Marshal(atom) + require.NoError(t, err, "xml.Marshal errored unexpectedly.") + require.Equal(t, []byte("foo"), bytes, + "xml.Marshal encoded the wrong bytes.") + }) + + t.Run("XML/Unmarshal", func(t *testing.T) { + err := xml.Unmarshal([]byte("bar"), &atom) + require.NoError(t, err, "xml.Unmarshal errored unexpectedly.") + require.Equal(t, "bar", atom.Load(), "xml.Unmarshal didn't set the correct value.") + }) + + t.Run("String", func(t *testing.T) { + atom := NewString("foo") + assert.Equal(t, "foo", atom.String(), + "String() returned an unexpected value.") + }) + + t.Run("CompareAndSwap", func(t *testing.T) { + atom := NewString("foo") + + swapped := atom.CompareAndSwap("bar", "bar") + require.False(t, swapped, "swapped isn't false") + require.Equal(t, atom.Load(), "foo", "Load returned wrong value") + + swapped = atom.CompareAndSwap("foo", "bar") + require.True(t, swapped, "swapped isn't true") + require.Equal(t, atom.Load(), "bar", "Load returned wrong value") + }) + + t.Run("Swap", func(t *testing.T) { + atom := NewString("foo") + + old := atom.Swap("bar") + require.Equal(t, old, "foo", "Swap returned wrong value") + require.Equal(t, atom.Load(), "bar", "Load returned wrong value") + }) +} + +func TestString_InitializeDefault(t *testing.T) { + tests := []struct { + msg string + newStr func() *String + }{ + { + msg: "Uninitialized", + newStr: func() *String { + var s String + return &s + }, + }, + { + msg: "NewString with default", + newStr: func() *String { + return NewString("") + }, + }, + { + msg: "String swapped with default", + newStr: func() *String { + s := NewString("initial") + s.Swap("") + return s + }, + }, + { + msg: "String CAS'd with default", + newStr: func() *String { + s := NewString("initial") + s.CompareAndSwap("initial", "") + return s + }, + }, + } + + for _, tt := range tests { + t.Run(tt.msg, func(t *testing.T) { + t.Run("MarshalText", func(t *testing.T) { + str := tt.newStr() + text, err := str.MarshalText() + require.NoError(t, err) + assert.Equal(t, "", string(text), "") + }) + + t.Run("String", func(t *testing.T) { + str := tt.newStr() + assert.Equal(t, "", str.String()) + }) + + t.Run("CompareAndSwap", func(t *testing.T) { + str := tt.newStr() + require.True(t, str.CompareAndSwap("", "new")) + assert.Equal(t, "new", str.Load()) + }) + + t.Run("Swap", func(t *testing.T) { + str := tt.newStr() + assert.Equal(t, "", str.Swap("new")) + }) + }) + } +} diff --git a/atomic/time.go b/atomic/time.go new file mode 100644 index 0000000..cc2a230 --- /dev/null +++ b/atomic/time.go @@ -0,0 +1,55 @@ +// @generated Code generated by gen-atomicwrapper. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "time" +) + +// Time is an atomic type-safe wrapper for time.Time values. +type Time struct { + _ nocmp // disallow non-atomic comparison + + v Value +} + +var _zeroTime time.Time + +// NewTime creates a new Time. +func NewTime(val time.Time) *Time { + x := &Time{} + if val != _zeroTime { + x.Store(val) + } + return x +} + +// Load atomically loads the wrapped time.Time. +func (x *Time) Load() time.Time { + return unpackTime(x.v.Load()) +} + +// Store atomically stores the passed time.Time. +func (x *Time) Store(val time.Time) { + x.v.Store(packTime(val)) +} diff --git a/atomic/time_ext.go b/atomic/time_ext.go new file mode 100644 index 0000000..1e3dc97 --- /dev/null +++ b/atomic/time_ext.go @@ -0,0 +1,36 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "time" + +//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go + +func packTime(t time.Time) interface{} { + return t +} + +func unpackTime(v interface{}) time.Time { + if t, ok := v.(time.Time); ok { + return t + } + return time.Time{} +} diff --git a/atomic/time_test.go b/atomic/time_test.go new file mode 100644 index 0000000..83ac022 --- /dev/null +++ b/atomic/time_test.go @@ -0,0 +1,86 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTime(t *testing.T) { + start := time.Date(2021, 6, 17, 9, 10, 0, 0, time.UTC) + atom := NewTime(start) + + require.Equal(t, start, atom.Load(), "Load didn't work") + require.Equal(t, time.Time{}, NewTime(time.Time{}).Load(), "Default time value is wrong") +} + +func TestTimeLocation(t *testing.T) { + // Check TZ data hasn't been lost from load/store. + ny, err := time.LoadLocation("America/New_York") + require.NoError(t, err, "Failed to load location") + nyTime := NewTime(time.Date(2021, 1, 1, 0, 0, 0, 0, ny)) + + var atom Time + atom.Store(nyTime.Load()) + + assert.Equal(t, ny, atom.Load().Location(), "Location information is wrong") +} + +func TestLargeTime(t *testing.T) { + // Check "large/small" time that are beyond int64 ns + // representation (< year 1678 or > year 2262) can be + // correctly load/store'd. + t.Parallel() + + t.Run("future", func(t *testing.T) { + future := time.Date(2262, 12, 31, 0, 0, 0, 0, time.UTC) + atom := NewTime(future) + dayAfterFuture := atom.Load().AddDate(0, 1, 0) + + atom.Store(dayAfterFuture) + assert.Equal(t, 2263, atom.Load().Year()) + }) + + t.Run("past", func(t *testing.T) { + past := time.Date(1678, 1, 1, 0, 0, 0, 0, time.UTC) + atom := NewTime(past) + dayBeforePast := atom.Load().AddDate(0, -1, 0) + + atom.Store(dayBeforePast) + assert.Equal(t, 1677, atom.Load().Year()) + }) +} + +func TestMonotonic(t *testing.T) { + before := NewTime(time.Now()) + time.Sleep(15 * time.Millisecond) + after := NewTime(time.Now()) + + // try loading/storing before and test monotonic clock value hasn't been lost + bt := before.Load() + before.Store(bt) + d := after.Load().Sub(before.Load()) + assert.True(t, 15 <= d.Milliseconds()) +} diff --git a/atomic/tools/tools.go b/atomic/tools/tools.go new file mode 100644 index 0000000..6c8e7e8 --- /dev/null +++ b/atomic/tools/tools.go @@ -0,0 +1,30 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build tools +// +build tools + +package tools + +import ( + // Tools used during development. + _ "golang.org/x/lint/golint" + _ "honnef.co/go/tools/cmd/staticcheck" +) diff --git a/atomic/uint32.go b/atomic/uint32.go new file mode 100644 index 0000000..4adc294 --- /dev/null +++ b/atomic/uint32.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint32 is an atomic wrapper around uint32. +type Uint32 struct { + _ nocmp // disallow non-atomic comparison + + v uint32 +} + +// NewUint32 creates a new Uint32. +func NewUint32(val uint32) *Uint32 { + return &Uint32{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(delta uint32) uint32 { + return atomic.AddUint32(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(delta uint32) uint32 { + return atomic.AddUint32(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Uint32) CAS(old, new uint32) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(val uint32) { + atomic.StoreUint32(&i.v, val) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(val uint32) (old uint32) { + return atomic.SwapUint32(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint32 into JSON. +func (i *Uint32) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint32. +func (i *Uint32) UnmarshalJSON(b []byte) error { + var v uint32 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint32) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/atomic/uint32_test.go b/atomic/uint32_test.go new file mode 100644 index 0000000..8bfcda2 --- /dev/null +++ b/atomic/uint32_test.go @@ -0,0 +1,77 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUint32(t *testing.T) { + atom := NewUint32(42) + + require.Equal(t, uint32(42), atom.Load(), "Load didn't work.") + require.Equal(t, uint32(46), atom.Add(4), "Add didn't work.") + require.Equal(t, uint32(44), atom.Sub(2), "Sub didn't work.") + require.Equal(t, uint32(45), atom.Inc(), "Inc didn't work.") + require.Equal(t, uint32(44), atom.Dec(), "Dec didn't work.") + + require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.") + require.Equal(t, uint32(0), atom.Load(), "CAS didn't set the correct value.") + + require.Equal(t, uint32(0), atom.Swap(1), "Swap didn't return the old value.") + require.Equal(t, uint32(1), atom.Load(), "Swap didn't set the correct value.") + + atom.Store(42) + require.Equal(t, uint32(42), atom.Load(), "Store didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("40"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, uint32(40), atom.Load(), + "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte(`"40"`), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + // Use an integer with the signed bit set. If we're converting + // incorrectly, we'll get a negative value here. + atom := NewUint32(math.MaxUint32) + assert.Equal(t, "4294967295", atom.String(), + "String() returned an unexpected value.") + }) +} diff --git a/atomic/uint64.go b/atomic/uint64.go new file mode 100644 index 0000000..0e2eddb --- /dev/null +++ b/atomic/uint64.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uint64 is an atomic wrapper around uint64. +type Uint64 struct { + _ nocmp // disallow non-atomic comparison + + v uint64 +} + +// NewUint64 creates a new Uint64. +func NewUint64(val uint64) *Uint64 { + return &Uint64{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(delta uint64) uint64 { + return atomic.AddUint64(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(delta uint64) uint64 { + return atomic.AddUint64(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Uint64) CAS(old, new uint64) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(val uint64) { + atomic.StoreUint64(&i.v, val) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(val uint64) (old uint64) { + return atomic.SwapUint64(&i.v, val) +} + +// MarshalJSON encodes the wrapped uint64 into JSON. +func (i *Uint64) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uint64. +func (i *Uint64) UnmarshalJSON(b []byte) error { + var v uint64 + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uint64) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/atomic/uint64_test.go b/atomic/uint64_test.go new file mode 100644 index 0000000..1141e5a --- /dev/null +++ b/atomic/uint64_test.go @@ -0,0 +1,77 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUint64(t *testing.T) { + atom := NewUint64(42) + + require.Equal(t, uint64(42), atom.Load(), "Load didn't work.") + require.Equal(t, uint64(46), atom.Add(4), "Add didn't work.") + require.Equal(t, uint64(44), atom.Sub(2), "Sub didn't work.") + require.Equal(t, uint64(45), atom.Inc(), "Inc didn't work.") + require.Equal(t, uint64(44), atom.Dec(), "Dec didn't work.") + + require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.") + require.Equal(t, uint64(0), atom.Load(), "CAS didn't set the correct value.") + + require.Equal(t, uint64(0), atom.Swap(1), "Swap didn't return the old value.") + require.Equal(t, uint64(1), atom.Load(), "Swap didn't set the correct value.") + + atom.Store(42) + require.Equal(t, uint64(42), atom.Load(), "Store didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("40"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, uint64(40), atom.Load(), + "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte(`"40"`), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + // Use an integer with the signed bit set. If we're converting + // incorrectly, we'll get a negative value here. + atom := NewUint64(math.MaxUint64) + assert.Equal(t, "18446744073709551615", atom.String(), + "String() returned an unexpected value.") + }) +} diff --git a/atomic/uintptr.go b/atomic/uintptr.go new file mode 100644 index 0000000..7d5b000 --- /dev/null +++ b/atomic/uintptr.go @@ -0,0 +1,109 @@ +// @generated Code generated by gen-atomicint. + +// Copyright (c) 2020-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "strconv" + "sync/atomic" +) + +// Uintptr is an atomic wrapper around uintptr. +type Uintptr struct { + _ nocmp // disallow non-atomic comparison + + v uintptr +} + +// NewUintptr creates a new Uintptr. +func NewUintptr(val uintptr) *Uintptr { + return &Uintptr{v: val} +} + +// Load atomically loads the wrapped value. +func (i *Uintptr) Load() uintptr { + return atomic.LoadUintptr(&i.v) +} + +// Add atomically adds to the wrapped uintptr and returns the new value. +func (i *Uintptr) Add(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, delta) +} + +// Sub atomically subtracts from the wrapped uintptr and returns the new value. +func (i *Uintptr) Sub(delta uintptr) uintptr { + return atomic.AddUintptr(&i.v, ^(delta - 1)) +} + +// Inc atomically increments the wrapped uintptr and returns the new value. +func (i *Uintptr) Inc() uintptr { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uintptr and returns the new value. +func (i *Uintptr) Dec() uintptr { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap. +func (i *Uintptr) CAS(old, new uintptr) (swapped bool) { + return i.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) { + return atomic.CompareAndSwapUintptr(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uintptr) Store(val uintptr) { + atomic.StoreUintptr(&i.v, val) +} + +// Swap atomically swaps the wrapped uintptr and returns the old value. +func (i *Uintptr) Swap(val uintptr) (old uintptr) { + return atomic.SwapUintptr(&i.v, val) +} + +// MarshalJSON encodes the wrapped uintptr into JSON. +func (i *Uintptr) MarshalJSON() ([]byte, error) { + return json.Marshal(i.Load()) +} + +// UnmarshalJSON decodes JSON into the wrapped uintptr. +func (i *Uintptr) UnmarshalJSON(b []byte) error { + var v uintptr + if err := json.Unmarshal(b, &v); err != nil { + return err + } + i.Store(v) + return nil +} + +// String encodes the wrapped value as a string. +func (i *Uintptr) String() string { + v := i.Load() + return strconv.FormatUint(uint64(v), 10) +} diff --git a/atomic/uintptr_test.go b/atomic/uintptr_test.go new file mode 100644 index 0000000..7d8ac39 --- /dev/null +++ b/atomic/uintptr_test.go @@ -0,0 +1,80 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUintptr(t *testing.T) { + atom := NewUintptr(42) + + require.Equal(t, uintptr(42), atom.Load(), "Load didn't work.") + require.Equal(t, uintptr(46), atom.Add(4), "Add didn't work.") + require.Equal(t, uintptr(44), atom.Sub(2), "Sub didn't work.") + require.Equal(t, uintptr(45), atom.Inc(), "Inc didn't work.") + require.Equal(t, uintptr(44), atom.Dec(), "Dec didn't work.") + + require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.") + require.Equal(t, uintptr(0), atom.Load(), "CAS didn't set the correct value.") + + require.Equal(t, uintptr(0), atom.Swap(1), "Swap didn't return the old value.") + require.Equal(t, uintptr(1), atom.Load(), "Swap didn't set the correct value.") + + atom.Store(42) + require.Equal(t, uintptr(42), atom.Load(), "Store didn't set the correct value.") + + t.Run("JSON/Marshal", func(t *testing.T) { + bytes, err := json.Marshal(atom) + require.NoError(t, err, "json.Marshal errored unexpectedly.") + require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.") + }) + + t.Run("JSON/Unmarshal", func(t *testing.T) { + err := json.Unmarshal([]byte("40"), &atom) + require.NoError(t, err, "json.Unmarshal errored unexpectedly.") + require.Equal(t, uintptr(40), atom.Load(), + "json.Unmarshal didn't set the correct value.") + }) + + t.Run("JSON/Unmarshal/Error", func(t *testing.T) { + err := json.Unmarshal([]byte(`"40"`), &atom) + require.Error(t, err, "json.Unmarshal didn't error as expected.") + assertErrorJSONUnmarshalType(t, err, + "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err) + }) + + t.Run("String", func(t *testing.T) { + // Use an integer with the signed bit set. If we're converting + // incorrectly, we'll get a negative value here. + // Use an int variable, as constants cause compile-time overflows. + negative := -1 + atom := NewUintptr(uintptr(negative)) + want := fmt.Sprint(uintptr(negative)) + assert.Equal(t, want, atom.String(), + "String() returned an unexpected value.") + }) +} diff --git a/atomic/unsafe_pointer.go b/atomic/unsafe_pointer.go new file mode 100644 index 0000000..34868ba --- /dev/null +++ b/atomic/unsafe_pointer.go @@ -0,0 +1,65 @@ +// Copyright (c) 2021-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "sync/atomic" + "unsafe" +) + +// UnsafePointer is an atomic wrapper around unsafe.Pointer. +type UnsafePointer struct { + _ nocmp // disallow non-atomic comparison + + v unsafe.Pointer +} + +// NewUnsafePointer creates a new UnsafePointer. +func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer { + return &UnsafePointer{v: val} +} + +// Load atomically loads the wrapped value. +func (p *UnsafePointer) Load() unsafe.Pointer { + return atomic.LoadPointer(&p.v) +} + +// Store atomically stores the passed value. +func (p *UnsafePointer) Store(val unsafe.Pointer) { + atomic.StorePointer(&p.v, val) +} + +// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value. +func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) { + return atomic.SwapPointer(&p.v, val) +} + +// CAS is an atomic compare-and-swap. +// +// Deprecated: Use CompareAndSwap +func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) { + return p.CompareAndSwap(old, new) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) { + return atomic.CompareAndSwapPointer(&p.v, old, new) +} diff --git a/atomic/unsafe_pointer_test.go b/atomic/unsafe_pointer_test.go new file mode 100644 index 0000000..f0193df --- /dev/null +++ b/atomic/unsafe_pointer_test.go @@ -0,0 +1,83 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "testing" + "unsafe" + + "github.com/stretchr/testify/require" +) + +func TestUnsafePointer(t *testing.T) { + i := int64(42) + j := int64(0) + k := int64(1) + + tests := []struct { + desc string + newAtomic func() *UnsafePointer + initial unsafe.Pointer + }{ + { + desc: "non-empty", + newAtomic: func() *UnsafePointer { + return NewUnsafePointer(unsafe.Pointer(&i)) + }, + initial: unsafe.Pointer(&i), + }, + { + desc: "nil", + newAtomic: func() *UnsafePointer { + var p UnsafePointer + return &p + }, + initial: unsafe.Pointer(nil), + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + t.Run("Load", func(t *testing.T) { + atom := tt.newAtomic() + require.Equal(t, tt.initial, atom.Load(), "Load should report nil.") + }) + + t.Run("Swap", func(t *testing.T) { + atom := tt.newAtomic() + require.Equal(t, tt.initial, atom.Swap(unsafe.Pointer(&k)), "Swap didn't return the old value.") + require.Equal(t, unsafe.Pointer(&k), atom.Load(), "Swap didn't set the correct value.") + }) + + t.Run("CAS", func(t *testing.T) { + atom := tt.newAtomic() + require.True(t, atom.CAS(tt.initial, unsafe.Pointer(&j)), "CAS didn't report a swap.") + require.Equal(t, unsafe.Pointer(&j), atom.Load(), "CAS didn't set the correct value.") + }) + + t.Run("Store", func(t *testing.T) { + atom := tt.newAtomic() + atom.Store(unsafe.Pointer(&i)) + require.Equal(t, unsafe.Pointer(&i), atom.Load(), "Store didn't set the correct value.") + }) + }) + } +} diff --git a/atomic/value.go b/atomic/value.go new file mode 100644 index 0000000..52caedb --- /dev/null +++ b/atomic/value.go @@ -0,0 +1,31 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import "sync/atomic" + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct { + _ nocmp // disallow non-atomic comparison + + atomic.Value +} diff --git a/atomic/value_test.go b/atomic/value_test.go new file mode 100644 index 0000000..bb9f301 --- /dev/null +++ b/atomic/value_test.go @@ -0,0 +1,40 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValue(t *testing.T) { + var v Value + assert.Nil(t, v.Load(), "initial Value is not nil") + + v.Store(42) + assert.Equal(t, 42, v.Load()) + + v.Store(84) + assert.Equal(t, 84, v.Load()) + + assert.Panics(t, func() { v.Store("foo") }) +} diff --git a/auth/nip42.go b/auth/nip42.go index 8b0a7c7..6a22e17 100644 --- a/auth/nip42.go +++ b/auth/nip42.go @@ -4,13 +4,13 @@ import ( "crypto/rand" "encoding/base64" "net/url" + "orly.dev/chk" + "orly.dev/log" "strings" "time" - "orly.dev/chk" "orly.dev/event" "orly.dev/kind" - "orly.dev/log" "orly.dev/tag" "orly.dev/tags" "orly.dev/timestamp" @@ -26,7 +26,7 @@ func GenerateChallenge() (b []byte) { } // CreateUnsigned creates an event which should be sent via an "AUTH" command. -// If the authentication succeeds, the user will be authenticated as pubkey. +// If the authentication succeeds, the user will be authenticated as a pubkey. func CreateUnsigned(pubkey, challenge []byte, relayURL string) (ev *event.E) { return &event.E{ Pubkey: pubkey, @@ -41,13 +41,19 @@ func CreateUnsigned(pubkey, challenge []byte, relayURL string) (ev *event.E) { // helper function for ValidateAuthEvent. func parseURL(input string) (*url.URL, error) { - return url.Parse(strings.ToLower(strings.TrimSuffix(input, "/"))) + return url.Parse( + strings.ToLower( + strings.TrimSuffix(input, "/"), + ), + ) } var ( - // ChallengeTag is the tag for the challenge in a NIP-42 auth event (prevents relay attacks). + // ChallengeTag is the tag for the challenge in a NIP-42 auth event + // (prevents relay attacks). ChallengeTag = []byte("challenge") - // RelayTag is is the relay tag for a NIP-42 auth event (prevents cross-server attacks). + // RelayTag is the relay tag for a NIP-42 auth event (prevents cross-server + // attacks). RelayTag = []byte("relay") ) diff --git a/auth/nip42_test.go b/auth/nip42_test.go index 2e7f137..51e1507 100644 --- a/auth/nip42_test.go +++ b/auth/nip42_test.go @@ -1,9 +1,9 @@ package auth import ( + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/p256k" ) diff --git a/bech32encoding/keys.go b/bech32encoding/keys.go index 1fc56a3..31d6d64 100644 --- a/bech32encoding/keys.go +++ b/bech32encoding/keys.go @@ -2,14 +2,14 @@ package bech32encoding import ( "bytes" - "orly.dev/chk" + "orly.dev/log" + btcec "orly.dev/ec" "orly.dev/ec/bech32" "orly.dev/ec/schnorr" "orly.dev/ec/secp256k1" "orly.dev/hex" - "orly.dev/log" ) const ( diff --git a/bech32encoding/keys_test.go b/bech32encoding/keys_test.go index 2e7d5e9..55d914c 100644 --- a/bech32encoding/keys_test.go +++ b/bech32encoding/keys_test.go @@ -4,9 +4,9 @@ import ( "bytes" "crypto/rand" "encoding/hex" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/ec/schnorr" "orly.dev/ec/secp256k1" ) diff --git a/bech32encoding/nip19.go b/bech32encoding/nip19.go index 74a6327..74bfe8b 100644 --- a/bech32encoding/nip19.go +++ b/bech32encoding/nip19.go @@ -3,18 +3,18 @@ package bech32encoding import ( "bytes" "encoding/binary" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" - "github.com/minio/sha256-simd" "orly.dev/bech32encoding/pointers" "orly.dev/bech32encoding/tlv" - "orly.dev/chk" "orly.dev/ec/bech32" "orly.dev/ec/schnorr" - "orly.dev/errorf" "orly.dev/eventid" "orly.dev/hex" "orly.dev/kind" - "orly.dev/log" + "orly.dev/sha256" ) var ( @@ -223,8 +223,6 @@ func EncodeEvent( pubkey := make([]byte, schnorr.PubKeyBytesLen) if _, err = hex.DecBytes(pubkey, author); len(pubkey) == 32 { tlv.WriteEntry(buf, tlv.Author, pubkey) - } else if chk.E(err) { - return } var bits5 []byte if bits5, err = bech32.ConvertBits(buf.Bytes(), 8, 5, true); chk.D(err) { diff --git a/bech32encoding/nip19_test.go b/bech32encoding/nip19_test.go index 39fb5b0..7c2ffdd 100644 --- a/bech32encoding/nip19_test.go +++ b/bech32encoding/nip19_test.go @@ -2,15 +2,15 @@ package bech32encoding import ( "bytes" + "orly.dev/chk" + "orly.dev/log" "reflect" "testing" "orly.dev/bech32encoding/pointers" - "orly.dev/chk" "orly.dev/eventid" "orly.dev/hex" "orly.dev/kind" - "orly.dev/log" ) func TestEncodeNpub(t *testing.T) { diff --git a/bech32encoding/tlv/tlv.go b/bech32encoding/tlv/tlv.go index 95e068e..838a877 100644 --- a/bech32encoding/tlv/tlv.go +++ b/bech32encoding/tlv/tlv.go @@ -5,8 +5,6 @@ package tlv import ( "io" - - "orly.dev/chk" ) const ( @@ -20,17 +18,17 @@ const ( func ReadEntry(buf io.Reader) (typ uint8, value []byte) { var err error t := make([]byte, 1) - if _, err = buf.Read(t); chk.E(err) { + if _, err = buf.Read(t); err != nil { return } typ = t[0] l := make([]byte, 1) - if _, err = buf.Read(l); chk.E(err) { + if _, err = buf.Read(l); err != nil { return } length := int(l[0]) value = make([]byte, length) - if _, err = buf.Read(value); chk.E(err) { + if _, err = buf.Read(value); err != nil { // nil value signals end of data or error value = nil } diff --git a/bin/binary.go b/bin/binary.go new file mode 100644 index 0000000..52b0f91 --- /dev/null +++ b/bin/binary.go @@ -0,0 +1,40 @@ +package bin + +import ( + "encoding/binary" + "orly.dev/errorf" +) + +// Append is a straight append with length prefix. +func Append(dst, src []byte) (b []byte) { + // if an allocation or two may occur, do it all in one immediately. + minLen := len(src) + len(dst) + binary.MaxVarintLen32 + if cap(dst) < minLen { + tmp := make([]byte, 0, minLen) + dst = append(tmp, dst...) + } + dst = binary.AppendUvarint(dst, uint64(len(src))) + dst = append(dst, src...) + b = dst + return +} + +// Extract decodes the data based on the length prefix and returns a the the +// remaining data from the provided slice. +func Extract(b []byte) (str, rem []byte, err error) { + l, read := binary.Uvarint(b) + if read < 1 { + err = errorf.E("failed to read uvarint length prefix") + return + } + if len(b) < int(l)+read { + err = errorf.E( + "insufficient data in buffer, require %d have %d", + int(l)+read, len(b), + ) + return + } + str = b[read : read+int(l)] + rem = b[read+int(l):] + return +} diff --git a/cmd/doc.go b/cmd/doc.go new file mode 100644 index 0000000..0fac62d --- /dev/null +++ b/cmd/doc.go @@ -0,0 +1,2 @@ +// Package cmd contains the executable applications of the realy suite. +package cmd diff --git a/cmd/lerproxy/LICENSE b/cmd/lerproxy/LICENSE new file mode 100644 index 0000000..3b0fd64 --- /dev/null +++ b/cmd/lerproxy/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2016 Artyom Pervukhin +Copyright (c) 2024 mleku npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/cmd/lerproxy/README.md b/cmd/lerproxy/README.md new file mode 100644 index 0000000..cce1ceb --- /dev/null +++ b/cmd/lerproxy/README.md @@ -0,0 +1,125 @@ +# lerproxy + +Command lerproxy implements https reverse proxy with automatic LetsEncrypt and your own own TLS +certificates for multiple hostnames/backends including a static filesystem directory, nostr +DNS verification [NIP-05](https://github.com/nostr-protocol/nips/blob/master/05.md) hosting. + +## Install + + go install lerproxy.mleku.dev@latest + +## Run + +``` +Usage: lerproxy.mleku.dev [--listen LISTEN] [--map MAP] [--rewrites REWRITES] [--cachedir CACHEDIR] [--hsts] [--email EMAIL] [--http HTTP] [--rto RTO] [--wto WTO] [--idle IDLE] [--cert CERT] + +Options: + --listen LISTEN, -l LISTEN + address to listen at [default: :https] + --map MAP, -m MAP file with host/backend mapping [default: mapping.txt] + --rewrites REWRITES, -r REWRITES [default: rewrites.txt] + --cachedir CACHEDIR, -c CACHEDIR + path to directory to cache key and certificates [default: /var/cache/letsencrypt] + --hsts, -h add Strict-Transport-Security header + --email EMAIL, -e EMAIL + contact email address presented to letsencrypt CA + --http HTTP optional address to serve http-to-https redirects and ACME http-01 challenge responses [default: :http] + --rto RTO, -r RTO maximum duration before timing out read of the request [default: 1m] + --wto WTO, -w WTO maximum duration before timing out write of the response [default: 5m] + --idle IDLE, -i IDLE how long idle connection is kept before closing (set rto, wto to 0 to use this) + --cert CERT certificates and the domain they match: eg: mleku.dev:/path/to/cert - this will indicate to load two, one with extension .key and one with .crt, each expected to be PEM encoded TLS private and public keys, respectively + --help, -h display this help and exit +``` + +`mapping.txt` contains host-to-backend mapping, where backend can be specified +as: + +* http/https url for http(s) connections to backend *without* passing "Host" + header from request; +* host:port for http over TCP connections to backend; +* absolute path for http over unix socket connections; +* @name for http over abstract unix socket connections (linux only); +* absolute path with a trailing slash to serve files from a given directory; +* path to a nostr.json file containing a + [nip-05](https://github.com/nostr-protocol/nips/blob/master/05.md) and + hosting it at `https://example.com/.well-known/nostr.json` +* using the prefix `git+` and a full web address path after it, generate html + with the necessary meta tags that indicate to the `go` tool when fetching + dependencies from the address found after the `+`. +* in the launch parameters for `lerproxy` you can now add any number of `--cert` parameters with + the domain (including for wildcards), and the path to the `.crt`/`.key` files: + + lerproxy.mleku.dev --cert :/path/to/TLS_cert + + this will then, if found, load and parse the TLS certificate and secret key if the suffix of + the domain matches. The certificate path is expanded to two files with the above filename + extensions and become active in place of the LetsEncrypt certificates + + > Note that the match is greedy, so you can explicitly separately give a subdomain + certificate and it will be selected even if there is a wildcard that also matches. + +# IMPORTANT + +With Comodo SSL (sectigo RSA) certificates you also need to append the intermediate certificate +to the `.crt` file in order to get it to work properly with openssl library based tools like +wget, curl and the go tool, which is quite important if you want to do subdomains on a wildcard +certificate. + +Probably the same applies to some of the other certificate authorities. If you sometimes get +issues with CLI tools refusing to accept these certificates on your web server or other, this +may be the problem. + +## example mapping.txt + + nostr.example.com: /path/to/nostr.json + subdomain1.example.com: 127.0.0.1:8080 + subdomain2.example.com: /var/run/http.socket + subdomain3.example.com: @abstractUnixSocket + uploads.example.com: https://uploads-bucket.s3.amazonaws.com + # this is a comment, it can only start on a new line + static.example.com: /var/www/ + awesome-go-project.example.com: git+https://github.com/crappy-name/crappy-go-project-name + +Note that when `@name` backend is specified, connection to abstract unix socket +is made in a manner compatible with some other implementations like uWSGI, that +calculate addrlen including trailing zero byte despite [documentation not +requiring that](http://man7.org/linux/man-pages/man7/unix.7.html). It won't +work with other implementations that calculate addrlen differently (i.e. by +taking into account only `strlen(addr)` like Go, or even `UNIX_PATH_MAX`). + +## systemd service file + +``` +[Unit] +Description=lerproxy + +[Service] +Type=simple +User=username +ExecStart=/usr/local/bin/lerproxy.mleku.dev -m /path/to/mapping.txt -l xxx.xxx.xxx.xxx:443 --http xxx.xxx.xxx.6:80 -m /path/to/mapping.txt -e email@example.com -c /path/to/letsencrypt/cache --cert example.com:/path/to/tls/certs +Restart=on-failure +Wants=network-online.target +After=network.target network-online.target wg-quick@wg0.service + +[Install] +WantedBy=multi-user.target +``` + +If your VPS has wireguard running and you want to be able to host services from the other end of +a tunnel, such as your dev machine (something I do for nostr relay development) add the +`wg-quick@wg0` or whatever wg-quick configuration you are using to ensure when it boots, +`lerproxy` does not run until the tunnel is active. + +## privileged port binding + +The simplest way to allow `lerproxy` to bind to port 80 and 443 is as follows: + + setcap 'cap_net_bind_service=+ep' /path/to/lerproxy.mleku.dev + +## todo + +- add url rewriting such as flipping addresses such as a gitea instance + `example.com/gituser/reponame` to `reponame.example.com` by funneling all + `example.com/gituser` into be rewritten to be the only accessible user account on the gitea + instance. or for other things like a dynamic subscription based hosting service subdomain + instead of path \ No newline at end of file diff --git a/cmd/lerproxy/buf/bufpool.go b/cmd/lerproxy/buf/bufpool.go new file mode 100644 index 0000000..95abf42 --- /dev/null +++ b/cmd/lerproxy/buf/bufpool.go @@ -0,0 +1,16 @@ +// Package buf implements a simple concurrent safe buffer pool for raw bytes. +package buf + +import "sync" + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buf := make([]byte, 32*1024) + return &buf + }, +} + +type Pool struct{} + +func (bp Pool) Get() []byte { return *(bufferPool.Get().(*[]byte)) } +func (bp Pool) Put(b []byte) { bufferPool.Put(&b) } diff --git a/cmd/lerproxy/hsts/proxy.go b/cmd/lerproxy/hsts/proxy.go new file mode 100644 index 0000000..e26f9f9 --- /dev/null +++ b/cmd/lerproxy/hsts/proxy.go @@ -0,0 +1,15 @@ +// Package hsts implements a HTTP handler that enforces HSTS. +package hsts + +import "net/http" + +type Proxy struct { + http.Handler +} + +func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header(). + Set("Strict-Transport-Security", + "max-age=31536000; includeSubDomains; preload") + p.ServeHTTP(w, r) +} diff --git a/cmd/lerproxy/main.go b/cmd/lerproxy/main.go new file mode 100644 index 0000000..f9540e3 --- /dev/null +++ b/cmd/lerproxy/main.go @@ -0,0 +1,403 @@ +// Command lerproxy implements https reverse proxy with automatic LetsEncrypt +// usage for multiple hostnames/backends,your own SSL certificates, nostr NIP-05 +// DNS verification hosting and Go vanity redirects. +package main + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "fmt" + "io" + stdLog "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "orly.dev/chk" + "orly.dev/log" + "os" + "os/signal" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/alexflint/go-arg" + "golang.org/x/crypto/acme/autocert" + "golang.org/x/sync/errgroup" + + "orly.dev/cmd/lerproxy/buf" + "orly.dev/cmd/lerproxy/hsts" + "orly.dev/cmd/lerproxy/reverse" + "orly.dev/cmd/lerproxy/tcpkeepalive" + "orly.dev/cmd/lerproxy/util" + "orly.dev/context" +) + +type runArgs struct { + Addr string `arg:"-l,--listen" default:":https" help:"address to listen at"` + Conf string `arg:"-m,--map" default:"mapping.txt" help:"file with host/backend mapping"` + Cache string `arg:"-c,--cachedir" default:"/var/cache/letsencrypt" help:"path to directory to cache key and certificates"` + HSTS bool `arg:"-h,--hsts" help:"add Strict-Transport-Security header"` + Email string `arg:"-e,--email" help:"contact email address presented to letsencrypt CA"` + HTTP string `arg:"--http" default:":http" help:"optional address to serve http-to-https redirects and ACME http-01 challenge responses"` + RTO time.Duration `arg:"-r,--rto" default:"1m" help:"maximum duration before timing out read of the request"` + WTO time.Duration `arg:"-w,--wto" default:"5m" help:"maximum duration before timing out write of the response"` + Idle time.Duration `arg:"-i,--idle" help:"how long idle connection is kept before closing (set rto, wto to 0 to use this)"` + Certs []string `arg:"--cert,separate" help:"certificates and the domain they match: eg: orly.dev:/path/to/cert - this will indicate to load two, one with extension .key and one with .crt, each expected to be PEM encoded TLS private and public keys, respectively"` + // Rewrites string `arg:"-r,--rewrites" default:"rewrites.txt"` +} + +var args runArgs + +func main() { + arg.MustParse(&args) + ctx, cancel := signal.NotifyContext(context.Bg(), os.Interrupt) + defer cancel() + if err := run(ctx, args); chk.T(err) { + log.F.Ln(err) + } +} + +func run(c context.T, args runArgs) (err error) { + + if args.Cache == "" { + err = log.E.Err("no cache specified") + return + } + + var srv *http.Server + var httpHandler http.Handler + if srv, httpHandler, err = setupServer(args); chk.E(err) { + return + } + srv.ReadHeaderTimeout = 5 * time.Second + if args.RTO > 0 { + srv.ReadTimeout = args.RTO + } + if args.WTO > 0 { + srv.WriteTimeout = args.WTO + } + group, ctx := errgroup.WithContext(c) + if args.HTTP != "" { + httpServer := http.Server{ + Addr: args.HTTP, + Handler: httpHandler, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + } + group.Go( + func() (err error) { + chk.E(httpServer.ListenAndServe()) + return + }, + ) + group.Go( + func() error { + <-ctx.Done() + ctx, cancel := context.Timeout( + context.Bg(), + time.Second, + ) + defer cancel() + return httpServer.Shutdown(ctx) + }, + ) + } + if srv.ReadTimeout != 0 || srv.WriteTimeout != 0 || args.Idle == 0 { + group.Go( + func() (err error) { + chk.E(srv.ListenAndServeTLS("", "")) + return + }, + ) + } else { + group.Go( + func() (err error) { + var ln net.Listener + if ln, err = net.Listen("tcp", srv.Addr); chk.E(err) { + return + } + defer ln.Close() + ln = tcpkeepalive.Listener{ + Duration: args.Idle, + TCPListener: ln.(*net.TCPListener), + } + err = srv.ServeTLS(ln, "", "") + chk.E(err) + return + }, + ) + } + group.Go( + func() error { + <-ctx.Done() + ctx, cancel := context.Timeout(context.Bg(), time.Second) + defer cancel() + return srv.Shutdown(ctx) + }, + ) + return group.Wait() +} + +// TLSConfig returns a TLSConfig that works with a LetsEncrypt automatic SSL cert issuer as well +// as any provided .pem certificates from providers. +// +// The certs are provided in the form "example.com:/path/to/cert.pem" +func TLSConfig(m *autocert.Manager, certs ...string) (tc *tls.Config) { + certMap := make(map[string]*tls.Certificate) + var mx sync.Mutex + for _, cert := range certs { + split := strings.Split(cert, ":") + if len(split) != 2 { + log.E.F("invalid certificate parameter format: `%s`", cert) + continue + } + var err error + var c tls.Certificate + if c, err = tls.LoadX509KeyPair( + split[1]+".crt", split[1]+".key", + ); chk.E(err) { + continue + } + certMap[split[0]] = &c + } + tc = m.TLSConfig() + tc.GetCertificate = func(helo *tls.ClientHelloInfo) ( + cert *tls.Certificate, err error, + ) { + mx.Lock() + var own string + for i := range certMap { + // to also handle explicit subdomain certs, prioritize over a root wildcard. + if helo.ServerName == i { + own = i + break + } + // if it got to us and ends in the same name dot tld assume the subdomain was + // redirected or it's a wildcard certificate, thus only the ending needs to match. + if strings.HasSuffix(helo.ServerName, i) { + own = i + break + } + } + if own != "" { + defer mx.Unlock() + return certMap[own], nil + } + mx.Unlock() + return m.GetCertificate(helo) + } + return +} + +func setupServer(a runArgs) (s *http.Server, h http.Handler, err error) { + var mapping map[string]string + if mapping, err = readMapping(a.Conf); chk.E(err) { + return + } + var proxy http.Handler + if proxy, err = setProxy(mapping); chk.E(err) { + return + } + if a.HSTS { + proxy = &hsts.Proxy{Handler: proxy} + } + if err = os.MkdirAll(a.Cache, 0700); chk.E(err) { + err = fmt.Errorf( + "cannot create cache directory %q: %v", + a.Cache, err, + ) + chk.E(err) + return + } + m := autocert.Manager{ + Prompt: autocert.AcceptTOS, + Cache: autocert.DirCache(a.Cache), + HostPolicy: autocert.HostWhitelist(util.GetKeys(mapping)...), + Email: a.Email, + } + s = &http.Server{ + Handler: proxy, + Addr: a.Addr, + TLSConfig: TLSConfig(&m, a.Certs...), + } + h = m.HTTPHandler(nil) + return +} + +type NostrJSON struct { + Names map[string]string `json:"names"` + Relays map[string][]string `json:"relays"` +} + +func setProxy(mapping map[string]string) (h http.Handler, err error) { + if len(mapping) == 0 { + return nil, fmt.Errorf("empty mapping") + } + mux := http.NewServeMux() + for hostname, backendAddr := range mapping { + hn, ba := hostname, backendAddr + if strings.ContainsRune(hn, os.PathSeparator) { + err = log.E.Err("invalid hostname: %q", hn) + return + } + network := "tcp" + if ba != "" && ba[0] == '@' && runtime.GOOS == "linux" { + // append \0 to address so addrlen for connect(2) is calculated in a + // way compatible with some other implementations (i.e. uwsgi) + network, ba = "unix", ba+string(byte(0)) + } else if strings.HasPrefix(ba, "git+") { + split := strings.Split(ba, "git+") + if len(split) != 2 { + log.E.Ln("invalid go vanity redirect: %s: %s", hn, ba) + continue + } + redirector := fmt.Sprintf( + `redirecting to %s`, + hn, split[1], split[1], split[1], split[1], + ) + mux.HandleFunc( + hn+"/", + func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set( + "Access-Control-Allow-Methods", + "GET,HEAD,PUT,PATCH,POST,DELETE", + ) + writer.Header().Set("Access-Control-Allow-Origin", "*") + writer.Header().Set("Content-Type", "text/html") + writer.Header().Set( + "Content-Length", fmt.Sprint(len(redirector)), + ) + writer.Header().Set( + "strict-transport-security", + "max-age=0; includeSubDomains", + ) + fmt.Fprint(writer, redirector) + }, + ) + continue + } else if filepath.IsAbs(ba) { + network = "unix" + switch { + case strings.HasSuffix(ba, string(os.PathSeparator)): + // path specified as directory with explicit trailing slash; add + // this path as static site + fs := http.FileServer(http.Dir(ba)) + mux.Handle(hn+"/", fs) + continue + case strings.HasSuffix(ba, "nostr.json"): + log.I.Ln(hn, ba) + var fb []byte + if fb, err = os.ReadFile(ba); chk.E(err) { + continue + } + var v NostrJSON + if err = json.Unmarshal(fb, &v); chk.E(err) { + continue + } + var jb []byte + if jb, err = json.Marshal(v); chk.E(err) { + continue + } + nostrJSON := string(jb) + mux.HandleFunc( + hn+"/.well-known/nostr.json", + func(writer http.ResponseWriter, request *http.Request) { + log.I.Ln("serving nostr json to", hn) + writer.Header().Set( + "Access-Control-Allow-Methods", + "GET,HEAD,PUT,PATCH,POST,DELETE", + ) + writer.Header().Set("Access-Control-Allow-Origin", "*") + writer.Header().Set("Content-Type", "application/json") + writer.Header().Set( + "Content-Length", fmt.Sprint(len(nostrJSON)), + ) + writer.Header().Set( + "strict-transport-security", + "max-age=0; includeSubDomains", + ) + fmt.Fprint(writer, nostrJSON) + }, + ) + continue + } + } else if u, err := url.Parse(ba); err == nil { + switch u.Scheme { + case "http", "https": + rp := reverse.NewSingleHostReverseProxy(u) + modifyCORSResponse := func(res *http.Response) error { + res.Header.Set( + "Access-Control-Allow-Methods", + "GET,HEAD,PUT,PATCH,POST,DELETE", + ) + // res.Header.Set("Access-Control-Allow-Credentials", "true") + res.Header.Set("Access-Control-Allow-Origin", "*") + return nil + } + rp.ModifyResponse = modifyCORSResponse + rp.ErrorLog = stdLog.New( + os.Stderr, "lerproxy", stdLog.Llongfile, + ) + rp.BufferPool = buf.Pool{} + mux.Handle(hn+"/", rp) + continue + } + } + rp := &httputil.ReverseProxy{ + Director: func(req *http.Request) { + req.URL.Scheme = "http" + req.URL.Host = req.Host + req.Header.Set("X-Forwarded-Proto", "https") + req.Header.Set("X-Forwarded-For", req.RemoteAddr) + req.Header.Set( + "Access-Control-Allow-Methods", + "GET,HEAD,PUT,PATCH,POST,DELETE", + ) + // req.Header.Set("Access-Control-Allow-Credentials", "true") + req.Header.Set("Access-Control-Allow-Origin", "*") + log.D.Ln(req.URL, req.RemoteAddr) + }, + Transport: &http.Transport{ + DialContext: func(c context.T, n, addr string) ( + net.Conn, error, + ) { + return net.DialTimeout(network, ba, 5*time.Second) + }, + }, + ErrorLog: stdLog.New(io.Discard, "", 0), + BufferPool: buf.Pool{}, + } + mux.Handle(hn+"/", rp) + } + return mux, nil +} + +func readMapping(file string) (m map[string]string, err error) { + var f *os.File + if f, err = os.Open(file); chk.E(err) { + return + } + m = make(map[string]string) + sc := bufio.NewScanner(f) + for sc.Scan() { + if b := sc.Bytes(); len(b) == 0 || b[0] == '#' { + continue + } + s := strings.SplitN(sc.Text(), ":", 2) + if len(s) != 2 { + err = fmt.Errorf("invalid line: %q", sc.Text()) + log.E.Ln(err) + chk.E(f.Close()) + return + } + m[strings.TrimSpace(s[0])] = strings.TrimSpace(s[1]) + } + err = sc.Err() + chk.E(err) + chk.E(f.Close()) + return +} diff --git a/cmd/lerproxy/reverse/proxy.go b/cmd/lerproxy/reverse/proxy.go new file mode 100644 index 0000000..acfbe75 --- /dev/null +++ b/cmd/lerproxy/reverse/proxy.go @@ -0,0 +1,35 @@ +// Package reverse is a copy of httputil.NewSingleHostReverseProxy with addition +// of "X-Forwarded-Proto" header. +package reverse + +import ( + "net/http" + "net/http/httputil" + "net/url" + "orly.dev/log" + + "orly.dev/cmd/lerproxy/util" +) + +// NewSingleHostReverseProxy is a copy of httputil.NewSingleHostReverseProxy +// with addition of "X-Forwarded-Proto" header. +func NewSingleHostReverseProxy(target *url.URL) (rp *httputil.ReverseProxy) { + targetQuery := target.RawQuery + director := func(req *http.Request) { + log.D.S(req) + req.URL.Scheme = target.Scheme + req.URL.Host = target.Host + req.URL.Path = util.SingleJoiningSlash(target.Path, req.URL.Path) + if targetQuery == "" || req.URL.RawQuery == "" { + req.URL.RawQuery = targetQuery + req.URL.RawQuery + } else { + req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery + } + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", "") + } + req.Header.Set("X-Forwarded-Proto", "https") + } + rp = &httputil.ReverseProxy{Director: director} + return +} diff --git a/cmd/lerproxy/tcpkeepalive/listener.go b/cmd/lerproxy/tcpkeepalive/listener.go new file mode 100644 index 0000000..abca982 --- /dev/null +++ b/cmd/lerproxy/tcpkeepalive/listener.go @@ -0,0 +1,40 @@ +// Package tcpkeepalive implements a net.TCPListener with a singleton set period +// for a default 3 minute keep-aline. +package tcpkeepalive + +import ( + "net" + "orly.dev/chk" + "time" + + "orly.dev/cmd/lerproxy/timeout" +) + +// Period can be changed prior to opening a Listener to alter its' +// KeepAlivePeriod. +var Period = 3 * time.Minute + +// Listener sets TCP keep-alive timeouts on accepted connections. +// It's used by ListenAndServe and ListenAndServeTLS so dead TCP connections +// (e.g. closing laptop mid-download) eventually go away. +type Listener struct { + time.Duration + *net.TCPListener +} + +func (ln Listener) Accept() (conn net.Conn, e error) { + var tc *net.TCPConn + if tc, e = ln.AcceptTCP(); chk.E(e) { + return + } + if e = tc.SetKeepAlive(true); chk.E(e) { + return + } + if e = tc.SetKeepAlivePeriod(Period); chk.E(e) { + return + } + if ln.Duration != 0 { + return timeout.Conn{Duration: ln.Duration, TCPConn: tc}, nil + } + return tc, nil +} diff --git a/cmd/lerproxy/timeout/conn.go b/cmd/lerproxy/timeout/conn.go new file mode 100644 index 0000000..88988b4 --- /dev/null +++ b/cmd/lerproxy/timeout/conn.go @@ -0,0 +1,33 @@ +// Package timeout provides a simple extension of a net.TCPConn with a +// configurable read/write deadline. +package timeout + +import ( + "net" + "orly.dev/chk" + "time" +) + +// Conn extends deadline after successful read or write operations +type Conn struct { + time.Duration + *net.TCPConn +} + +func (c Conn) Read(b []byte) (n int, e error) { + if n, e = c.TCPConn.Read(b); !chk.E(e) { + if e = c.SetDeadline(c.getTimeout()); chk.E(e) { + } + } + return +} + +func (c Conn) Write(b []byte) (n int, e error) { + if n, e = c.TCPConn.Write(b); !chk.E(e) { + if e = c.SetDeadline(c.getTimeout()); chk.E(e) { + } + } + return +} + +func (c Conn) getTimeout() (t time.Time) { return time.Now().Add(c.Duration) } diff --git a/cmd/lerproxy/util/u.go b/cmd/lerproxy/util/u.go new file mode 100644 index 0000000..9a333ce --- /dev/null +++ b/cmd/lerproxy/util/u.go @@ -0,0 +1,26 @@ +// Package util provides some helpers for lerproxy, a tool to convert maps of +// strings to slices of the same strings, and a helper to avoid putting two / in +// a URL. +package util + +import "strings" + +func GetKeys(m map[string]string) []string { + out := make([]string, 0, len(m)) + for k := range m { + out = append(out, k) + } + return out +} + +func SingleJoiningSlash(a, b string) string { + suffixSlash := strings.HasSuffix(a, "/") + prefixSlash := strings.HasPrefix(b, "/") + switch { + case suffixSlash && prefixSlash: + return a + b[1:] + case !suffixSlash && !prefixSlash: + return a + "/" + b + } + return a + b +} diff --git a/cmd/nauth/main.go b/cmd/nauth/main.go new file mode 100644 index 0000000..999c7a9 --- /dev/null +++ b/cmd/nauth/main.go @@ -0,0 +1,89 @@ +package main + +import ( + "encoding/base64" + "fmt" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" + "os" + "time" + + "orly.dev/bech32encoding" + "orly.dev/httpauth" + "orly.dev/p256k" + "orly.dev/signer" +) + +const secEnv = "NOSTR_SECRET_KEY" + +func fail(format string, a ...any) { + _, _ = fmt.Fprintf(os.Stderr, format+"\n", a...) + os.Exit(1) +} + +func main() { + // lol.SetLogLevel("trace") + if len(os.Args) > 1 && os.Args[1] == "help" { + fmt.Printf( + `nauth help: + +for generating extended expiration NIP-98 tokens: + + nauth + + * NIP-98 secret will be expected in the environment variable "%s" - if absent, will not be added to the header. Endpoint is assumed to not require it if absent. An error will be returned if it was needed. + + output will be rendered to stdout + +`, secEnv, + ) + os.Exit(0) + } + if len(os.Args) < 3 { + fail( + `error: nauth requires minimum 2 args: + + signing nsec (in bech32 format) is expected to be found in %s environment variable. + + use "help" to get usage information +`, secEnv, + ) + } + ex, err := time.ParseDuration(os.Args[2]) + if err != nil { + fail(err.Error()) + } + var sign signer.I + if sign, err = GetNIP98Signer(); err != nil { + fail(err.Error()) + } + exp := time.Now().Add(ex).Unix() + ev := httpauth.MakeNIP98Event(os.Args[1], "", "", exp) + if err = ev.Sign(sign); err != nil { + fail(err.Error()) + } + log.T.F("nip-98 http auth event:\n%s\n", ev.SerializeIndented()) + b64 := base64.URLEncoding.EncodeToString(ev.Serialize()) + fmt.Println("Nostr " + b64) +} + +func GetNIP98Signer() (sign signer.I, err error) { + nsex := os.Getenv(secEnv) + var sk []byte + if len(nsex) == 0 { + err = errorf.E( + "no bech32 secret key found in environment variable %s", secEnv, + ) + return + } else if sk, err = bech32encoding.NsecToBytes([]byte(nsex)); chk.E(err) { + err = errorf.E("failed to decode nsec: '%s'", err.Error()) + return + } + sign = &p256k.Signer{} + if err = sign.InitSec(sk); chk.E(err) { + err = errorf.E("failed to init signer: '%s'", err.Error()) + return + } + return +} diff --git a/cmd/nurl/main.go b/cmd/nurl/main.go new file mode 100644 index 0000000..2d34e55 --- /dev/null +++ b/cmd/nurl/main.go @@ -0,0 +1,196 @@ +// Package main is a simple implementation of a cURL like tool that can do +// simple GET/POST operations on a HTTP server that understands NIP-98 +// authentication, with the signing key found in an environment variable. +package main + +import ( + "fmt" + "io" + "net/http" + "net/url" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" + realy_lol "orly.dev/version" + "os" + + "orly.dev/bech32encoding" + "orly.dev/hex" + "orly.dev/httpauth" + "orly.dev/p256k" + "orly.dev/sha256" + "orly.dev/signer" +) + +const secEnv = "NOSTR_SECRET_KEY" + +var userAgent = fmt.Sprintf("nurl/%s", realy_lol.V) + +func fail(format string, a ...any) { + _, _ = fmt.Fprintf(os.Stderr, format+"\n", a...) + os.Exit(1) +} + +func main() { + // lol.SetLogLevel("trace") + if len(os.Args) > 1 && os.Args[1] == "help" { + fmt.Printf( + `nurl help: + +for nostr http using NIP-98 HTTP authentication: + + nurl + + if no file is given, the request will be processed as a HTTP GET (if relevant there can be request parameters). + + * NIP-98 secret will be expected in the environment variable "%s" - if absent, will not be added to the header. Endpoint is assumed to not require it if absent. An error will be returned if it was needed. + + output will be rendered to stdout + +`, secEnv, + ) + os.Exit(0) + } + if len(os.Args) < 2 { + fail( + `error: nurl requires minimum 1 arg: + + signing nsec (in bech32 format) is expected to be found in %s environment variable. + + use "help" to get usage information +`, secEnv, + ) + } + var err error + var sign signer.I + if sign, err = GetNIP98Signer(); err != nil { + } + var ur *url.URL + if ur, err = url.Parse(os.Args[1]); chk.E(err) { + fail("invalid URL: `%s` error: `%s`", os.Args[2], err.Error()) + } + log.T.S(ur) + if len(os.Args) == 2 { + if err = Get(ur, sign); chk.E(err) { + fail(err.Error()) + } + return + } + if err = Post(os.Args[2], ur, sign); chk.E(err) { + fail(err.Error()) + } +} + +func GetNIP98Signer() (sign signer.I, err error) { + nsex := os.Getenv(secEnv) + var sk []byte + if len(nsex) == 0 { + err = errorf.E( + "no bech32 secret key found in environment variable %s", secEnv, + ) + return + } else if sk, err = bech32encoding.NsecToBytes([]byte(nsex)); chk.E(err) { + err = errorf.E("failed to decode nsec: '%s'", err.Error()) + return + } + sign = &p256k.Signer{} + if err = sign.InitSec(sk); chk.E(err) { + err = errorf.E("failed to init signer: '%s'", err.Error()) + return + } + return +} + +func Get(ur *url.URL, sign signer.I) (err error) { + log.T.F("GET") + var r *http.Request + if r, err = http.NewRequest("GET", ur.String(), nil); chk.E(err) { + return + } + r.Header.Add("User-Agent", userAgent) + if sign != nil { + if err = httpauth.AddNIP98Header( + r, ur, "GET", "", sign, 0, + ); chk.E(err) { + fail(err.Error()) + } + } + client := &http.Client{ + CheckRedirect: func( + req *http.Request, + via []*http.Request, + ) error { + return http.ErrUseLastResponse + }, + } + var res *http.Response + if res, err = client.Do(r); chk.E(err) { + err = errorf.E("request failed: %w", err) + return + } + if _, err = io.Copy(os.Stdout, res.Body); chk.E(err) { + res.Body.Close() + return + } + res.Body.Close() + return +} + +func Post(f string, ur *url.URL, sign signer.I) (err error) { + log.T.F("POST") + var contentLength int64 + var payload io.ReadCloser + // get the file path parameters and optional hash + var fi os.FileInfo + if fi, err = os.Stat(f); chk.E(err) { + return + } + var b []byte + if b, err = os.ReadFile(f); chk.E(err) { + return + } + hb := sha256.Sum256(b) + h := hex.Enc(hb[:]) + contentLength = fi.Size() + if payload, err = os.Open(f); chk.E(err) { + return + } + log.T.F("opened file %s hash %s", f, h) + var r *http.Request + r = &http.Request{ + Method: "POST", + URL: ur, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Body: payload, + ContentLength: contentLength, + Host: ur.Host, + } + r.Header.Add("User-Agent", userAgent) + if sign != nil { + if err = httpauth.AddNIP98Header( + r, ur, "POST", h, sign, 0, + ); chk.E(err) { + fail(err.Error()) + } + } + r.GetBody = func() (rc io.ReadCloser, err error) { + rc = payload + return + } + // log.I.S(r) + client := &http.Client{} + var res *http.Response + if res, err = client.Do(r); chk.E(err) { + return + } + // log.I.S(res) + defer res.Body.Close() + if io.Copy(os.Stdout, res.Body); chk.E(err) { + return + } + fmt.Println() + return +} diff --git a/cmd/vainstr/LICENSE b/cmd/vainstr/LICENSE new file mode 100644 index 0000000..0e259d4 --- /dev/null +++ b/cmd/vainstr/LICENSE @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/cmd/vainstr/README.md b/cmd/vainstr/README.md new file mode 100644 index 0000000..2b3e7b4 --- /dev/null +++ b/cmd/vainstr/README.md @@ -0,0 +1,16 @@ +# vainstr +nostr vanity key miner + +## usage + +``` +Usage: vainstr [--threads THREADS] [STRING [POSITION]] + +Positional arguments: + STRING + POSITION [begin|contain|end] + +Options: + --threads THREADS number of threads to mine with - defaults to using all CPU threads available + --help, -h display this help and exit +``` \ No newline at end of file diff --git a/cmd/vainstr/main.go b/cmd/vainstr/main.go new file mode 100644 index 0000000..adb9d23 --- /dev/null +++ b/cmd/vainstr/main.go @@ -0,0 +1,233 @@ +// Package main is a simple nostr key miner that uses the fast bitcoin secp256k1 +// C library to derive npubs with specified prefix/infix/suffix strings present. +package main + +import ( + "bytes" + "encoding/hex" + "fmt" + "orly.dev/chk" + "orly.dev/log" + "os" + "runtime" + "strings" + "sync" + "time" + + "github.com/alexflint/go-arg" + + "orly.dev/atomic" + "orly.dev/bech32encoding" + "orly.dev/ec/bech32" + "orly.dev/ec/schnorr" + "orly.dev/ec/secp256k1" + "orly.dev/interrupt" + "orly.dev/qu" +) + +var prefix = append(bech32encoding.PubHRP, '1') + +const ( + PositionBeginning = iota + PositionContains + PositionEnding +) + +type Result struct { + sec *secp256k1.SecretKey + npub []byte + pub *secp256k1.PublicKey +} + +var args struct { + String string `arg:"positional" help:"the string you want to appear in the npub"` + Position string `arg:"positional" default:"end" help:"[begin|contain|end] default: end"` + Threads int `help:"number of threads to mine with - defaults to using all CPU threads available"` +} + +func main() { + arg.MustParse(&args) + if args.String == "" { + _, _ = fmt.Fprintln( + os.Stderr, + `Usage: vainstr [--threads THREADS] [STRING [POSITION]] + +Positional arguments: + STRING the string you want to appear in the npub + POSITION [begin|contain|end] default: end + +Options: + --threads THREADS number of threads to mine with - defaults to using all CPU threads available + --help, -h display this help and exit`, + ) + os.Exit(0) + } + var where int + canonical := strings.ToLower(args.Position) + switch { + case strings.HasPrefix(canonical, "begin"): + where = PositionBeginning + case strings.Contains(canonical, "contain"): + where = PositionContains + case strings.HasSuffix(canonical, "end"): + where = PositionEnding + } + if args.Threads == 0 { + args.Threads = runtime.NumCPU() + } + if err := Vanity(args.String, where, args.Threads); chk.T(err) { + log.F.F("error: %s", err) + } +} + +func Vanity(str string, where int, threads int) (e error) { + + // check the string has valid bech32 ciphers + for i := range str { + wrong := true + for j := range bech32.Charset { + if str[i] == bech32.Charset[j] { + wrong = false + break + } + } + if wrong { + return fmt.Errorf( + "found invalid character '%c' only ones from '%s' allowed\n", + str[i], bech32.Charset, + ) + } + } + started := time.Now() + quit, shutdown := qu.T(), qu.T() + resC := make(chan Result) + interrupt.AddHandler( + func() { + // this will stop work if CTRL-C or Interrupt signal from OS. + shutdown.Q() + }, + ) + var wg sync.WaitGroup + counter := atomic.NewInt64(0) + for i := 0; i < threads; i++ { + log.D.F("starting up worker %d", i) + go mine(str, where, quit, resC, &wg, counter) + } + tick := time.NewTicker(time.Second * 5) + var res Result +out: + for { + select { + case <-tick.C: + workingFor := time.Now().Sub(started) + wm := workingFor % time.Second + workingFor -= wm + fmt.Printf( + "working for %v, attempts %d\n", + workingFor, counter.Load(), + ) + case r := <-resC: + // one of the workers found the solution + res = r + // tell the others to stop + quit.Q() + break out + case <-shutdown.Wait(): + quit.Q() + log.I.Ln("\rinterrupt signal received") + os.Exit(0) + } + } + + // wait for all workers to stop + wg.Wait() + + fmt.Printf( + "generated in %d attempts using %d threads, taking %v\n", + counter.Load(), args.Threads, time.Now().Sub(started), + ) + secBytes := res.sec.Serialize() + log.D.Ln( + "generated key pair:\n"+ + "\nhex:\n"+ + "\tsecret: %s\n"+ + "\tpublic: %s\n\n", + hex.EncodeToString(secBytes), + hex.EncodeToString(schnorr.SerializePubKey(res.pub)), + ) + nsec, _ := bech32encoding.SecretKeyToNsec(res.sec) + fmt.Printf("\nNSEC = %s\nNPUB = %s\n\n", nsec, res.npub) + return +} + +func mine( + str string, where int, quit qu.C, resC chan Result, wg *sync.WaitGroup, + counter *atomic.Int64, +) { + + wg.Add(1) + var r Result + var e error + found := false +out: + for { + select { + case <-quit: + wg.Done() + if found { + // send back the result + log.D.Ln("sending back result\n") + resC <- r + log.D.Ln("sent\n") + } else { + log.D.Ln("other thread found it\n") + } + break out + default: + } + counter.Inc() + r.sec, r.pub, e = GenKeyPair() + if e != nil { + log.E.Ln("error generating key: '%v' worker stopping", e) + break out + } + r.npub, e = bech32encoding.PublicKeyToNpub(r.pub) + if e != nil { + log.E.Ln("fatal error generating npub: %s\n", e) + break out + } + switch where { + case PositionBeginning: + if bytes.HasPrefix(r.npub, append(prefix, []byte(str)...)) { + found = true + quit.Q() + } + case PositionEnding: + if bytes.HasSuffix(r.npub, []byte(str)) { + found = true + quit.Q() + } + case PositionContains: + if bytes.Contains(r.npub, []byte(str)) { + found = true + quit.Q() + } + } + } +} + +// GenKeyPair creates a fresh new key pair using the entropy source used by +// crypto/rand (ie, /dev/random on posix systems). +func GenKeyPair() ( + sec *secp256k1.SecretKey, + pub *secp256k1.PublicKey, err error, +) { + + sec, err = secp256k1.GenerateSecretKey() + if err != nil { + err = fmt.Errorf("error generating key: %s", err) + return + } + pub = sec.PubKey() + return +} diff --git a/codec/codec.go b/codec/codec.go new file mode 100644 index 0000000..d6b9e5f --- /dev/null +++ b/codec/codec.go @@ -0,0 +1,42 @@ +// Package codec is a set of interfaces for nostr messages and message elements. +package codec + +import ( + "io" +) + +// Envelope is an interface for the nostr "envelope" message formats, a JSON +// array with the first field an upper case string that provides type +// information, in combination with the context of the side sending it (relay or +// client). +type Envelope interface { + // Label returns the (uppercase) string that signifies the type of message. + Label() string + // Write outputs the envelope to an io.Writer + Write(w io.Writer) (err error) + // JSON is a somewhat simplified version of the json.Marshaler/json.Unmarshaler + // that has no error for the Marshal side of the operation. + JSON +} + +// JSON is a somewhat simplified version of the json.Marshaler/json.Unmarshaler +// that has no error for the Marshal side of the operation. +type JSON interface { + // Marshal converts the data of the type into JSON, appending it to the provided + // slice and returning the extended slice. + Marshal(dst []byte) (b []byte) + // Unmarshal decodes a JSON form of a type back into the runtime form, and + // returns whatever remains after the type has been decoded out. + Unmarshal(b []byte) (r []byte, err error) +} + +// Binary is a similarly simplified form of the stdlib binary Marshal/Unmarshal +// interfaces. Same as JSON it does not have an error for the MarshalBinary. +type Binary interface { + // MarshalBinary converts the data of the type into binary form, appending it to + // the provided slice. + MarshalBinary(dst []byte) (b []byte) + // UnmarshalBinary decodes a binary form of a type back into the runtime form, + // and returns whatever remains after the type has been decoded out. + UnmarshalBinary(b []byte) (r []byte, err error) +} diff --git a/database/fetch-event-by-serial_test.go b/database/fetch-event-by-serial_test.go index 1af7640..38fdeb6 100644 --- a/database/fetch-event-by-serial_test.go +++ b/database/fetch-event-by-serial_test.go @@ -81,7 +81,7 @@ func TestFetchEventBySerial(t *testing.T) { }, ) if err != nil { - t.Fatalf("Failed to query for IDs: %v", err) + t.Fatalf("Failed to query for Ids: %v", err) } // Verify we got exactly one result @@ -108,24 +108,32 @@ func TestFetchEventBySerial(t *testing.T) { // Verify the fetched event has the same ID as the original event if !bytes.Equal(fetchedEvent.Id, testEvent.Id) { - t.Fatalf("Fetched event ID doesn't match original event ID. Got %x, expected %x", - fetchedEvent.Id, testEvent.Id) + t.Fatalf( + "Fetched event ID doesn't match original event ID. Got %x, expected %x", + fetchedEvent.Id, testEvent.Id, + ) } // Verify other event properties match if fetchedEvent.Kind.K != testEvent.Kind.K { - t.Fatalf("Fetched event kind doesn't match. Got %d, expected %d", - fetchedEvent.Kind.K, testEvent.Kind.K) + t.Fatalf( + "Fetched event kind doesn't match. Got %d, expected %d", + fetchedEvent.Kind.K, testEvent.Kind.K, + ) } if !bytes.Equal(fetchedEvent.Pubkey, testEvent.Pubkey) { - t.Fatalf("Fetched event pubkey doesn't match. Got %x, expected %x", - fetchedEvent.Pubkey, testEvent.Pubkey) + t.Fatalf( + "Fetched event pubkey doesn't match. Got %x, expected %x", + fetchedEvent.Pubkey, testEvent.Pubkey, + ) } if fetchedEvent.CreatedAt.V != testEvent.CreatedAt.V { - t.Fatalf("Fetched event created_at doesn't match. Got %d, expected %d", - fetchedEvent.CreatedAt.V, testEvent.CreatedAt.V) + t.Fatalf( + "Fetched event created_at doesn't match. Got %d, expected %d", + fetchedEvent.CreatedAt.V, testEvent.CreatedAt.V, + ) } // Test with a non-existent serial @@ -143,6 +151,9 @@ func TestFetchEventBySerial(t *testing.T) { // The fetched event should be nil if fetchedEvent != nil { - t.Fatalf("Expected nil event for non-existent serial, but got: %v", fetchedEvent) + t.Fatalf( + "Expected nil event for non-existent serial, but got: %v", + fetchedEvent, + ) } } diff --git a/database/get-serial-by-id_test.go b/database/get-serial-by-id_test.go index d41316f..5f67419 100644 --- a/database/get-serial-by-id_test.go +++ b/database/get-serial-by-id_test.go @@ -69,32 +69,32 @@ func TestGetSerialById(t *testing.T) { // Test GetSerialById with a known event ID testEvent := events[3] // Using the same event as in QueryForIds test - + // Get the serial by ID serial, err := db.GetSerialById(testEvent.Id) if err != nil { t.Fatalf("Failed to get serial by ID: %v", err) } - + // Verify the serial is not nil if serial == nil { t.Fatal("Expected serial to be non-nil, but got nil") } - + // Test with a non-existent ID nonExistentId := make([]byte, len(testEvent.Id)) // Ensure it's different from any real ID for i := range nonExistentId { nonExistentId[i] = ^testEvent.Id[i] } - + serial, err = db.GetSerialById(nonExistentId) if err != nil { t.Fatalf("Expected no error for non-existent ID, but got: %v", err) } - - // For non-existent IDs, the function should return nil serial + + // For non-existent Ids, the function should return nil serial if serial != nil { t.Fatalf("Expected nil serial for non-existent ID, but got: %v", serial) } -} \ No newline at end of file +} diff --git a/database/indexes/keys_test.go b/database/indexes/keys_test.go index e377cd7..ab49cde 100644 --- a/database/indexes/keys_test.go +++ b/database/indexes/keys_test.go @@ -625,6 +625,7 @@ func TestPubkeyTagFunctions(t *testing.T) { // TestTagFunctions tests the Tag-related functions func TestTagFunctions(t *testing.T) { + var err error // Test TagVars k, v, ca, ser := TagVars() if k == nil || v == nil || ca == nil || ser == nil { @@ -752,6 +753,7 @@ func TestKindFunctions(t *testing.T) { // TestKindTagFunctions tests the TagKind-related functions func TestKindTagFunctions(t *testing.T) { + var err error // Test TagKindVars k, v, ki, ca, ser := TagKindVars() if ki == nil || k == nil || v == nil || ca == nil || ser == nil { diff --git a/database/indexes/types/identhash_test.go b/database/indexes/types/identhash_test.go index 647c579..a18b583 100644 --- a/database/indexes/types/identhash_test.go +++ b/database/indexes/types/identhash_test.go @@ -10,6 +10,7 @@ import ( ) func TestFromIdent(t *testing.T) { + var err error // Create a test identity testIdent := []byte("test-identity") @@ -34,6 +35,7 @@ func TestFromIdent(t *testing.T) { } func TestIdent_MarshalWriteUnmarshalRead(t *testing.T) { + var err error // Create a Ident with a known value i1 := &Ident{} testIdent := []byte("test-identity") @@ -68,6 +70,7 @@ func TestIdent_MarshalWriteUnmarshalRead(t *testing.T) { } func TestIdent_UnmarshalReadWithCorruptedData(t *testing.T) { + var err error // Create a Ident with a known value i1 := &Ident{} testIdent1 := []byte("test-identity-1") diff --git a/database/query-events.go b/database/query-events.go index 75f18bc..3fc9141 100644 --- a/database/query-events.go +++ b/database/query-events.go @@ -12,7 +12,7 @@ import ( ) // QueryEvents retrieves events based on the provided filter. -// If the filter contains IDs, it fetches events by those IDs directly, +// If the filter contains Ids, it fetches events by those Ids directly, // overriding other filter criteria. Otherwise, it queries by other filter // criteria and fetches matching events. Results are returned in reverse // chronological order of their creation timestamps. @@ -62,7 +62,7 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) { if bytes.Equal( ev.Pubkey, e.Pubkey, ) && ev.Kind.K == e.Kind.K { - + } } // } else if ev.Kind.IsParameterizedReplaceable(){ diff --git a/database/query-for-ids.go b/database/query-for-ids.go index ac52f63..96d942b 100644 --- a/database/query-for-ids.go +++ b/database/query-for-ids.go @@ -12,9 +12,9 @@ import ( ) // QueryForIds retrieves a list of IdPkTs based on the provided filter. -// It supports filtering by ranges and tags but disallows filtering by IDs. +// It supports filtering by ranges and tags but disallows filtering by Ids. // Results are sorted by timestamp in reverse chronological order. -// Returns an error if the filter contains IDs or if any operation fails. +// Returns an error if the filter contains Ids or if any operation fails. func (d *D) QueryForIds(c context.T, f *filter.F) ( idPkTs []store.IdPkTs, err error, ) { diff --git a/database/save-event.go b/database/save-event.go index dd7ddc8..8c213cf 100644 --- a/database/save-event.go +++ b/database/save-event.go @@ -34,8 +34,10 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) { // If there are previous events, log that we're replacing one if len(prevEvents) > 0 { - d.Logger.Infof("Saving new version of replaceable event kind %d from pubkey %s", - ev.Kind.K, hex.Enc(ev.Pubkey)) + d.Logger.Infof( + "Saving new version of replaceable event kind %d from pubkey %s", + ev.Kind.K, hex.Enc(ev.Pubkey), + ) } } @@ -89,6 +91,6 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) { return }, ) - // log.T.F("total data written: %d bytes keys %d bytes values", kc, vc) + // log.F.F("total data written: %d bytes keys %d bytes values", kc, vc) return } diff --git a/dns/nip05.go b/dns/nip05.go new file mode 100644 index 0000000..046384f --- /dev/null +++ b/dns/nip05.go @@ -0,0 +1,157 @@ +// Package dns is an implementation of the specification of NIP-05, providing +// DNS based verification for nostr identities. +package dns + +import ( + "encoding/json" + "fmt" + "net/http" + "orly.dev/chk" + "orly.dev/errorf" + "regexp" + "strings" + + "orly.dev/bech32encoding/pointers" + "orly.dev/context" + "orly.dev/keys" +) + +// Nip05Regex is an regular expression that matches up with the same pattern as +// an email address. +var Nip05Regex = regexp.MustCompile(`^(?:([\w.+-]+)@)?([\w_-]+(\.[\w_-]+)+)$`) + +// WellKnownResponse is the structure of the JSON to be found at +// /.well-known/nostr.json +type WellKnownResponse struct { + // Names is a list of usernames associated with the DNS identity as in @ + Names map[string]string `json:"names"` + // Relays associates one of the public keys from Names to a list of relay URLs + // that are recommended for that user. + Relays map[string][]string `json:"relays,omitempty"` + NIP46 map[string][]string `json:"nip46,omitempty"` // todo: is this obsolete? +} + +// NewWellKnownResponse creates a new WellKnownResponse and is required as all +// the fields are maps and need to be allocated. +func NewWellKnownResponse() *WellKnownResponse { + return &WellKnownResponse{ + Names: make(map[string]string), + Relays: make(map[string][]string), + NIP46: make(map[string][]string), + } +} + +// IsValidIdentifier verifies that an identifier matches a correct NIP-05 +// username@domain +func IsValidIdentifier(input string) bool { + return Nip05Regex.MatchString(input) +} + +// ParseIdentifier searches a string for a valid NIP-05 username@domain +func ParseIdentifier(account string) (name, domain string, err error) { + res := Nip05Regex.FindStringSubmatch(account) + if len(res) == 0 { + return "", "", errorf.E("invalid identifier") + } + if res[1] == "" { + res[1] = "_" + } + return res[1], res[2], nil +} + +// QueryIdentifier queries a web server from the domain of a NIP-05 DNS +// identifier +func QueryIdentifier(c context.T, account string) ( + prf *pointers.Profile, + err error, +) { + + var result *WellKnownResponse + var name string + if result, name, err = Fetch(c, account); chk.E(err) { + return + } + pubkey, ok := result.Names[name] + if !ok { + err = errorf.E("no entry for name '%s'", name) + return + } + if !keys.IsValidPublicKey(pubkey) { + return nil, errorf.E("got an invalid public key '%s'", pubkey) + } + var pkb []byte + if pkb, err = keys.HexPubkeyToBytes(pubkey); chk.E(err) { + return + } + relays, _ := result.Relays[pubkey] + return &pointers.Profile{ + PublicKey: pkb, + Relays: StringSliceToByteSlice(relays), + }, nil +} + +// Fetch parses a DNS identity to find the URL to query for a NIP-05 identity +// verification document. +func Fetch(c context.T, account string) ( + resp *WellKnownResponse, + name string, err error, +) { + + var domain string + if name, domain, err = ParseIdentifier(account); chk.E(err) { + err = errorf.E("failed to parse '%s': %w", account, err) + return + } + var req *http.Request + if req, err = http.NewRequestWithContext( + c, "GET", + fmt.Sprintf("https://%s/.well-known/nostr.json?name=%s", domain, name), + nil, + ); chk.E(err) { + + return resp, name, errorf.E("failed to create a request: %w", err) + } + client := &http.Client{ + CheckRedirect: func( + req *http.Request, + via []*http.Request, + ) error { + return http.ErrUseLastResponse + }, + } + var res *http.Response + if res, err = client.Do(req); chk.E(err) { + err = errorf.E("request failed: %w", err) + return + } + defer res.Body.Close() + resp = NewWellKnownResponse() + b := make([]byte, 65535) + var n int + if n, err = res.Body.Read(b); chk.E(err) { + return + } + b = b[:n] + if err = json.Unmarshal(b, resp); chk.E(err) { + err = errorf.E("failed to decode json response: %w", err) + } + return +} + +// NormalizeIdentifier mainly removes the `_@` from the base username so that +// only the domain remains. +func NormalizeIdentifier(account string) string { + if strings.HasPrefix(account, "_@") { + return account[2:] + } + return account +} + +// StringSliceToByteSlice converts a slice of strings to a slice of slices of +// bytes. +func StringSliceToByteSlice(ss []string) (bs [][]byte) { + for _, s := range ss { + bs = append(bs, []byte(s)) + } + return +} diff --git a/dns/nip05_test.go b/dns/nip05_test.go new file mode 100644 index 0000000..60bda91 --- /dev/null +++ b/dns/nip05_test.go @@ -0,0 +1,71 @@ +package dns + +import ( + "bytes" + "context" + "orly.dev/chk" + "testing" + + "orly.dev/bech32encoding/pointers" + "orly.dev/keys" +) + +func TestParse(t *testing.T) { + name, domain, _ := ParseIdentifier("saknd@yyq.com") + if name != "saknd" || domain != "yyq.com" { + t.Fatalf("wrong parsing") + } + + name, domain, _ = ParseIdentifier("287354gkj+asbdfo8gw3rlicbsopifbcp3iougb5piseubfdikswub5ks@yyq.com") + if name != "287354gkj+asbdfo8gw3rlicbsopifbcp3iougb5piseubfdikswub5ks" || domain != "yyq.com" { + t.Fatalf("wrong parsing") + } + + name, domain, _ = ParseIdentifier("asdn.com") + if name != "_" || domain != "asdn.com" { + t.Fatalf("wrong parsing") + } + + name, domain, _ = ParseIdentifier("_@uxux.com.br") + if name != "_" || domain != "uxux.com.br" { + t.Fatalf("wrong parsing") + } + + _, _, err := ParseIdentifier("821yh498ig21") + if err == nil { + t.Fatalf("should have errored") + } + + _, _, err = ParseIdentifier("////") + if err == nil { + t.Fatalf("should have errored") + } +} + +func TestQuery(t *testing.T) { + var pkb []byte + var err error + var pp *pointers.Profile + acct := "fiatjaf.com" + if pp, err = QueryIdentifier(context.Background(), acct); chk.E(err) { + t.Fatal(err) + } + if pkb, err = keys.HexPubkeyToBytes( + "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d", + ); chk.E(err) { + t.Fatal(err) + } + if err != nil || !bytes.Equal(pp.PublicKey, pkb) { + t.Fatalf("invalid query for fiatjaf.com") + } + + pp, err = QueryIdentifier(context.Background(), "htlc@fiatjaf.com") + if pkb, err = keys.HexPubkeyToBytes( + "f9dd6a762506260b38a2d3e5b464213c2e47fa3877429fe9ee60e071a31a07d7", + ); chk.E(err) { + t.Fatal(err) + } + if err != nil || !bytes.Equal(pp.PublicKey, pkb) { + t.Fatalf("invalid query for htlc@fiatjaf.com") + } +} diff --git a/ec/base58/base58_test.go b/ec/base58/base58_test.go index 9f2f525..220c9ab 100644 --- a/ec/base58/base58_test.go +++ b/ec/base58/base58_test.go @@ -9,7 +9,6 @@ import ( "encoding/hex" "testing" - "orly.dev/chk" "orly.dev/ec/base58" ) @@ -99,7 +98,7 @@ func TestBase58(t *testing.T) { // Decode tests for x, test := range hexTests { b, err := hex.DecodeString(test.in) - if chk.E(err) { + if err != nil { t.Errorf("hex.DecodeString failed failed #%d: got: %s", x, test.in) continue } diff --git a/ec/base58/base58check.go b/ec/base58/base58check.go index 6ecb7c1..21de493 100644 --- a/ec/base58/base58check.go +++ b/ec/base58/base58check.go @@ -7,7 +7,7 @@ package base58 import ( "errors" - "github.com/minio/sha256-simd" + "orly.dev/sha256" ) // ErrChecksum indicates that the checksum of a check-encoded string does not verify against diff --git a/ec/base58/base58check_test.go b/ec/base58/base58check_test.go index acdabab..d9a8e9c 100644 --- a/ec/base58/base58check_test.go +++ b/ec/base58/base58check_test.go @@ -7,7 +7,6 @@ package base58_test import ( "testing" - "orly.dev/chk" "orly.dev/ec/base58" ) @@ -51,7 +50,7 @@ func TestBase58Check(t *testing.T) { // test decoding res, version, err := base58.CheckDecode(test.out) switch { - case chk.E(err): + case err != nil: t.Errorf("CheckDecode test #%d failed with err: %v", x, err) case version != test.version: diff --git a/ec/base58/example_test.go b/ec/base58/example_test.go index bc43416..0076ecb 100644 --- a/ec/base58/example_test.go +++ b/ec/base58/example_test.go @@ -7,7 +7,6 @@ package base58_test import ( "fmt" - "orly.dev/chk" "orly.dev/ec/base58" ) @@ -43,7 +42,7 @@ func ExampleCheckDecode() { // Decode an example Base58Check encoded data. encoded := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa" decoded, version, err := base58.CheckDecode(encoded) - if chk.E(err) { + if err != nil { fmt.Println(err) return } diff --git a/ec/base58/genalphabet.go b/ec/base58/genalphabet.go index 90f7330..9cb8702 100644 --- a/ec/base58/genalphabet.go +++ b/ec/base58/genalphabet.go @@ -13,8 +13,6 @@ import ( "log" "os" "strconv" - - "orly.dev/chk" ) var ( @@ -47,14 +45,14 @@ var b58 = [256]byte{`) func write(w io.Writer, b []byte) { _, err := w.Write(b) - if chk.E(err) { + if err != nil { log.Fatal(err) } } func main() { fi, err := os.Create("alphabet.go") - if chk.E(err) { + if err != nil { log.Fatal(err) } defer fi.Close() diff --git a/ec/base58/util_test.go b/ec/base58/util_test.go new file mode 100644 index 0000000..64b0b18 --- /dev/null +++ b/ec/base58/util_test.go @@ -0,0 +1,9 @@ +package base58_test + +import ( + "orly.dev/lol" +) + +var ( + log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf +) diff --git a/ec/bech32/bech32.go b/ec/bech32/bech32.go index 33a89a9..c27003b 100644 --- a/ec/bech32/bech32.go +++ b/ec/bech32/bech32.go @@ -8,8 +8,6 @@ package bech32 import ( "bytes" "strings" - - "orly.dev/chk" ) // Charset is the set of characters used in the data section of bech32 strings. @@ -122,10 +120,8 @@ func bech32Polymod(hrp []byte, values, checksum []byte) int { // and 126), otherwise the results are undefined. // // For more details on the checksum calculation, please refer to BIP 173. -func writeBech32Checksum( - hrp []byte, data []byte, bldr *bytes.Buffer, - version Version, -) { +func writeBech32Checksum(hrp []byte, data []byte, bldr *bytes.Buffer, + version Version) { bech32Const := int(VersionToConsts[version]) polymod := bech32Polymod(hrp, data, nil) ^ bech32Const @@ -205,7 +201,7 @@ func decodeNoLimit(bech []byte) ([]byte, []byte, Version, error) { // Each character corresponds to the byte with value of the index in // 'charset'. decoded, err := toBytes(data) - if chk.E(err) { + if err != nil { return nil, nil, VersionUnknown, err } // Verify if the checksum (stored inside decoded[:]) is valid, given the @@ -319,10 +315,8 @@ func EncodeM(hrp, data []byte) ([]byte, error) { // ConvertBits converts a byte slice where each byte is encoding fromBits bits, // to a byte slice where each byte is encoding toBits bits. -func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ( - []byte, - error, -) { +func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ([]byte, + error) { if fromBits < 1 || fromBits > 8 || toBits < 1 || toBits > 8 { return nil, ErrInvalidBitGroups{} @@ -391,7 +385,7 @@ func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ( // checksum purposes. func EncodeFromBase256(hrp, data []byte) ([]byte, error) { converted, err := ConvertBits(data, 8, 5, true) - if chk.E(err) { + if err != nil { return nil, err } return Encode(hrp, converted) @@ -402,11 +396,11 @@ func EncodeFromBase256(hrp, data []byte) ([]byte, error) { // base256-encoded byte slice and returns it along with the lowercase HRP. func DecodeToBase256(bech []byte) ([]byte, []byte, error) { hrp, data, err := Decode(bech) - if chk.E(err) { + if err != nil { return nil, nil, err } converted, err := ConvertBits(data, 5, 8, false) - if chk.E(err) { + if err != nil { return nil, nil, err } return hrp, converted, nil diff --git a/ec/bech32/bech32_test.go b/ec/bech32/bech32_test.go index 0bd59a7..dce7dbf 100644 --- a/ec/bech32/bech32_test.go +++ b/ec/bech32/bech32_test.go @@ -12,8 +12,6 @@ import ( "fmt" "strings" "testing" - - "orly.dev/chk" ) // TestBech32 tests whether decoding and re-encoding the valid BIP-173 test @@ -26,52 +24,32 @@ func TestBech32(t *testing.T) { }{ {"A12UEL5L", nil}, {"a12uel5l", nil}, - { - "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", - nil, - }, + {"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", + nil}, {"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", nil}, - { - "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", - nil, - }, + {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", + nil}, {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", nil}, - { - "split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w", - ErrInvalidChecksum{ - "2y9e3w", "2y9e3wlc445v", - "2y9e2w", - }, - }, // invalid checksum - { - "s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p", - ErrInvalidCharacter(' '), - }, // invalid character (space) in hrp - { - "spl\x7Ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", - ErrInvalidCharacter(127), - }, // invalid character (DEL) in hrp - { - "split1cheo2y9e2w", - ErrNonCharsetChar('o'), - }, // invalid character (o) in data part + {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w", + ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v", + "2y9e2w"}}, // invalid checksum + {"s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p", + ErrInvalidCharacter(' ')}, // invalid character (space) in hrp + {"spl\x7Ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", + ErrInvalidCharacter(127)}, // invalid character (DEL) in hrp + {"split1cheo2y9e2w", + ErrNonCharsetChar('o')}, // invalid character (o) in data part {"split1a2y9w", ErrInvalidSeparatorIndex(5)}, // too short data part - { - "1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", - ErrInvalidSeparatorIndex(0), - }, // empty hrp - { - "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", - ErrInvalidLength(91), - }, // too long + {"1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", + ErrInvalidSeparatorIndex(0)}, // empty hrp + {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", + ErrInvalidLength(91)}, // too long // Additional test vectors used in bitcoin core {" 1nwldj5", ErrInvalidCharacter(' ')}, {"\x7f" + "1axkwrx", ErrInvalidCharacter(0x7f)}, {"\x801eym55h", ErrInvalidCharacter(0x80)}, - { - "an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx", - ErrInvalidLength(91), - }, + {"an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx", + ErrInvalidLength(91)}, {"pzry9x0s0muk", ErrInvalidSeparatorIndex(-1)}, {"1pzry9x0s0muk", ErrInvalidSeparatorIndex(0)}, {"x1b4n0q5v", ErrNonCharsetChar(98)}, @@ -87,32 +65,28 @@ func TestBech32(t *testing.T) { str := []byte(test.str) hrp, decoded, err := Decode([]byte(str)) if !errors.Is(err, test.expectedError) { - t.Errorf( - "%d: expected decoding error %v "+ - "instead got %v", i, test.expectedError, err, - ) + t.Errorf("%d: expected decoding error %v "+ + "instead got %v", i, test.expectedError, err) continue } - if chk.E(err) { + if err != nil { // End test case here if a decoding error was expected. continue } // Check that it encodes to the same string encoded, err := Encode(hrp, decoded) - if chk.E(err) { + if err != nil { t.Errorf("encoding failed: %v", err) } if !bytes.Equal(encoded, bytes.ToLower([]byte(str))) { - t.Errorf( - "expected data to encode to %v, but got %v", - str, encoded, - ) + t.Errorf("expected data to encode to %v, but got %v", + str, encoded) } // Flip a bit in the string an make sure it is caught. pos := bytes.LastIndexAny(str, "1") flipped := []byte(string(str[:pos+1]) + string(str[pos+1]^1) + string(str[pos+2:])) _, _, err = Decode(flipped) - if !chk.E(err) { + if err == nil { t.Error("expected decoding to fail") } } @@ -129,25 +103,19 @@ func TestBech32M(t *testing.T) { }{ {"A1LQFN3A", nil}, {"a1lqfn3a", nil}, - { - "an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6", - nil, - }, + {"an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6", + nil}, {"abcdef1l7aum6echk45nj3s0wdvt2fg8x9yrzpqzd3ryx", nil}, - { - "11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8", - nil, - }, + {"11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8", + nil}, {"split1checkupstagehandshakeupstreamerranterredcaperredlc445v", nil}, {"?1v759aa", nil}, // Additional test vectors used in bitcoin core {"\x201xj0phk", ErrInvalidCharacter('\x20')}, {"\x7f1g6xzxy", ErrInvalidCharacter('\x7f')}, {"\x801vctc34", ErrInvalidCharacter('\x80')}, - { - "an84characterslonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11d6pts4", - ErrInvalidLength(91), - }, + {"an84characterslonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11d6pts4", + ErrInvalidLength(91)}, {"qyrz8wqd2c9m", ErrInvalidSeparatorIndex(-1)}, {"1qyrz8wqd2c9m", ErrInvalidSeparatorIndex(0)}, {"y1b0jsk6g", ErrNonCharsetChar(98)}, @@ -167,34 +135,30 @@ func TestBech32M(t *testing.T) { str := []byte(test.str) hrp, decoded, err := Decode(str) if test.expectedError != err { - t.Errorf( - "%d: (%v) expected decoding error %v "+ - "instead got %v", i, str, test.expectedError, - err, - ) + t.Errorf("%d: (%v) expected decoding error %v "+ + "instead got %v", i, str, test.expectedError, + err) continue } - if chk.E(err) { + if err != nil { // End test case here if a decoding error was expected. continue } // Check that it encodes to the same string, using bech32 m. encoded, err := EncodeM(hrp, decoded) - if chk.E(err) { + if err != nil { t.Errorf("encoding failed: %v", err) } if !bytes.Equal(encoded, bytes.ToLower(str)) { - t.Errorf( - "expected data to encode to %v, but got %v", - str, encoded, - ) + t.Errorf("expected data to encode to %v, but got %v", + str, encoded) } // Flip a bit in the string an make sure it is caught. pos := bytes.LastIndexAny(str, "1") flipped := []byte(string(str[:pos+1]) + string(str[pos+1]^1) + string(str[pos+2:])) _, _, err = Decode(flipped) - if !chk.E(err) { + if err == nil { t.Error("expected decoding to fail") } } @@ -210,73 +174,47 @@ func TestBech32DecodeGeneric(t *testing.T) { }{ {"A1LQFN3A", VersionM}, {"a1lqfn3a", VersionM}, - { - "an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6", - VersionM, - }, + {"an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6", + VersionM}, {"abcdef1l7aum6echk45nj3s0wdvt2fg8x9yrzpqzd3ryx", VersionM}, - { - "11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8", - VersionM, - }, - { - "split1checkupstagehandshakeupstreamerranterredcaperredlc445v", - VersionM, - }, + {"11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8", + VersionM}, + {"split1checkupstagehandshakeupstreamerranterredcaperredlc445v", + VersionM}, {"?1v759aa", VersionM}, {"A12UEL5L", Version0}, {"a12uel5l", Version0}, - { - "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", - Version0, - }, + {"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", + Version0}, {"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", Version0}, - { - "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", - Version0, - }, - { - "split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", - Version0, - }, + {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", + Version0}, + {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", + Version0}, {"BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4", Version0}, - { - "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7", - Version0, - }, - { - "bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kt5nd6y", - VersionM, - }, + {"tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7", + Version0}, + {"bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kt5nd6y", + VersionM}, {"BC1SW50QGDZ25J", VersionM}, {"bc1zw508d6qejxtdg4y5r3zarvaryvaxxpcs", VersionM}, - { - "tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy", - Version0, - }, - { - "tb1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesf3hn0c", - VersionM, - }, - { - "bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqzk5jj0", - VersionM, - }, + {"tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy", + Version0}, + {"tb1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesf3hn0c", + VersionM}, + {"bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqzk5jj0", + VersionM}, } for i, test := range tests { _, _, version, err := DecodeGeneric([]byte(test.str)) - if chk.E(err) { - t.Errorf( - "%d: (%v) unexpected error during "+ - "decoding: %v", i, test.str, err, - ) + if err != nil { + t.Errorf("%d: (%v) unexpected error during "+ + "decoding: %v", i, test.str, err) continue } if version != test.version { - t.Errorf( - "(%v): invalid version: expected %v, got %v", - test.str, test.version, version, - ) + t.Errorf("(%v): invalid version: expected %v, got %v", + test.str, test.version, version) } } } @@ -290,91 +228,79 @@ func TestMixedCaseEncode(t *testing.T) { hrp string data string encoded string - }{ - { - name: "all uppercase HRP with no data", - hrp: "A", - data: "", - encoded: "a12uel5l", - }, { - name: "all uppercase HRP with data", - hrp: "UPPERCASE", - data: "787878", - encoded: "uppercase10pu8sss7kmp", - }, { - name: "mixed case HRP even offsets uppercase", - hrp: "AbCdEf", - data: "00443214c74254b635cf84653a56d7c675be77df", - encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", - }, { - name: "mixed case HRP odd offsets uppercase ", - hrp: "aBcDeF", - data: "00443214c74254b635cf84653a56d7c675be77df", - encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", - }, { - name: "all lowercase HRP", - hrp: "abcdef", - data: "00443214c74254b635cf84653a56d7c675be77df", - encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", - }, - } + }{{ + name: "all uppercase HRP with no data", + hrp: "A", + data: "", + encoded: "a12uel5l", + }, { + name: "all uppercase HRP with data", + hrp: "UPPERCASE", + data: "787878", + encoded: "uppercase10pu8sss7kmp", + }, { + name: "mixed case HRP even offsets uppercase", + hrp: "AbCdEf", + data: "00443214c74254b635cf84653a56d7c675be77df", + encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", + }, { + name: "mixed case HRP odd offsets uppercase ", + hrp: "aBcDeF", + data: "00443214c74254b635cf84653a56d7c675be77df", + encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", + }, { + name: "all lowercase HRP", + hrp: "abcdef", + data: "00443214c74254b635cf84653a56d7c675be77df", + encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", + }} for _, test := range tests { // Convert the text hex to bytes, convert those bytes from base256 to // base32, then ensure the encoded result with the HRP provided in the // test data is as expected. data, err := hex.DecodeString(test.data) - if chk.E(err) { + if err != nil { t.Errorf("%q: invalid hex %q: %v", test.name, test.data, err) continue } convertedData, err := ConvertBits(data, 8, 5, true) - if chk.E(err) { - t.Errorf( - "%q: unexpected convert bits error: %v", test.name, - err, - ) + if err != nil { + t.Errorf("%q: unexpected convert bits error: %v", test.name, + err) continue } gotEncoded, err := Encode([]byte(test.hrp), convertedData) - if chk.E(err) { + if err != nil { t.Errorf("%q: unexpected encode error: %v", test.name, err) continue } if !bytes.Equal(gotEncoded, []byte(test.encoded)) { - t.Errorf( - "%q: mismatched encoding -- got %q, want %q", test.name, - gotEncoded, test.encoded, - ) + t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name, + gotEncoded, test.encoded) continue } // Ensure the decoding the expected lowercase encoding converted to all // uppercase produces the lowercase HRP and original data. gotHRP, gotData, err := Decode(bytes.ToUpper([]byte(test.encoded))) - if chk.E(err) { + if err != nil { t.Errorf("%q: unexpected decode error: %v", test.name, err) continue } wantHRP := strings.ToLower(test.hrp) if !bytes.Equal(gotHRP, []byte(wantHRP)) { - t.Errorf( - "%q: mismatched decoded HRP -- got %q, want %q", test.name, - gotHRP, wantHRP, - ) + t.Errorf("%q: mismatched decoded HRP -- got %q, want %q", test.name, + gotHRP, wantHRP) continue } convertedGotData, err := ConvertBits(gotData, 5, 8, false) - if chk.E(err) { - t.Errorf( - "%q: unexpected convert bits error: %v", test.name, - err, - ) + if err != nil { + t.Errorf("%q: unexpected convert bits error: %v", test.name, + err) continue } if !bytes.Equal(convertedGotData, data) { - t.Errorf( - "%q: mismatched data -- got %x, want %x", test.name, - convertedGotData, data, - ) + t.Errorf("%q: mismatched data -- got %x, want %x", test.name, + convertedGotData, data) continue } } @@ -386,16 +312,14 @@ func TestCanDecodeUnlimtedBech32(t *testing.T) { input := "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq5kx0yd" // Sanity check that an input of this length errors on regular Decode() _, _, err := Decode([]byte(input)) - if !chk.E(err) { + if err == nil { t.Fatalf("Test vector not appropriate") } // Try and decode it. hrp, data, err := DecodeNoLimit([]byte(input)) - if chk.E(err) { - t.Fatalf( - "Expected decoding of large string to work. Got error: %v", - err, - ) + if err != nil { + t.Fatalf("Expected decoding of large string to work. Got error: %v", + err) } // Verify data for correctness. if !bytes.Equal(hrp, []byte("1")) { @@ -419,145 +343,125 @@ func TestBech32Base256(t *testing.T) { hrp string // expected human-readable part data string // expected hex-encoded data err error // expected error - }{ - { - name: "all uppercase, no data", - encoded: "A12UEL5L", - hrp: "a", - data: "", - }, { - name: "long hrp with separator and excluded chars, no data", - encoded: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", - hrp: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio", - data: "", - }, { - name: "6 char hrp with data with leading zero", - encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", - hrp: "abcdef", - data: "00443214c74254b635cf84653a56d7c675be77df", - }, { - name: "hrp same as separator and max length encoded string", - encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", - hrp: "1", - data: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - }, { - name: "5 char hrp with data chosen to produce human-readable data part", - encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", - hrp: "split", - data: "c5f38b70305f519bf66d85fb6cf03058f3dde463ecd7918f2dc743918f2d", - }, { - name: "same as previous but with checksum invalidated", - encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w", - err: ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v", "2y9e2w"}, - }, { - name: "hrp with invalid character (space)", - encoded: "s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p", - err: ErrInvalidCharacter(' '), - }, { - name: "hrp with invalid character (DEL)", - encoded: "spl\x7ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", - err: ErrInvalidCharacter(127), - }, { - name: "data part with invalid character (o)", - encoded: "split1cheo2y9e2w", - err: ErrNonCharsetChar('o'), - }, { - name: "data part too short", - encoded: "split1a2y9w", - err: ErrInvalidSeparatorIndex(5), - }, { - name: "empty hrp", - encoded: "1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", - err: ErrInvalidSeparatorIndex(0), - }, { - name: "no separator", - encoded: "pzry9x0s0muk", - err: ErrInvalidSeparatorIndex(-1), - }, { - name: "too long by one char", - encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", - err: ErrInvalidLength(91), - }, { - name: "invalid due to mixed case in hrp", - encoded: "aBcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", - err: ErrMixedCase{}, - }, { - name: "invalid due to mixed case in data part", - encoded: "abcdef1Qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", - err: ErrMixedCase{}, - }, - } + }{{ + name: "all uppercase, no data", + encoded: "A12UEL5L", + hrp: "a", + data: "", + }, { + name: "long hrp with separator and excluded chars, no data", + encoded: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", + hrp: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio", + data: "", + }, { + name: "6 char hrp with data with leading zero", + encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", + hrp: "abcdef", + data: "00443214c74254b635cf84653a56d7c675be77df", + }, { + name: "hrp same as separator and max length encoded string", + encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", + hrp: "1", + data: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + }, { + name: "5 char hrp with data chosen to produce human-readable data part", + encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", + hrp: "split", + data: "c5f38b70305f519bf66d85fb6cf03058f3dde463ecd7918f2dc743918f2d", + }, { + name: "same as previous but with checksum invalidated", + encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w", + err: ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v", "2y9e2w"}, + }, { + name: "hrp with invalid character (space)", + encoded: "s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p", + err: ErrInvalidCharacter(' '), + }, { + name: "hrp with invalid character (DEL)", + encoded: "spl\x7ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", + err: ErrInvalidCharacter(127), + }, { + name: "data part with invalid character (o)", + encoded: "split1cheo2y9e2w", + err: ErrNonCharsetChar('o'), + }, { + name: "data part too short", + encoded: "split1a2y9w", + err: ErrInvalidSeparatorIndex(5), + }, { + name: "empty hrp", + encoded: "1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", + err: ErrInvalidSeparatorIndex(0), + }, { + name: "no separator", + encoded: "pzry9x0s0muk", + err: ErrInvalidSeparatorIndex(-1), + }, { + name: "too long by one char", + encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", + err: ErrInvalidLength(91), + }, { + name: "invalid due to mixed case in hrp", + encoded: "aBcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", + err: ErrMixedCase{}, + }, { + name: "invalid due to mixed case in data part", + encoded: "abcdef1Qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", + err: ErrMixedCase{}, + }} for _, test := range tests { // Ensure the decode either produces an error or not as expected. str := test.encoded gotHRP, gotData, err := DecodeToBase256([]byte(str)) if test.err != err { - t.Errorf( - "%q: unexpected decode error -- got %v, want %v", - test.name, err, test.err, - ) + t.Errorf("%q: unexpected decode error -- got %v, want %v", + test.name, err, test.err) continue } - if chk.E(err) { + if err != nil { // End test case here if a decoding error was expected. continue } // Ensure the expected HRP and original data are as expected. if !bytes.Equal(gotHRP, []byte(test.hrp)) { - t.Errorf( - "%q: mismatched decoded HRP -- got %q, want %q", test.name, - gotHRP, test.hrp, - ) + t.Errorf("%q: mismatched decoded HRP -- got %q, want %q", test.name, + gotHRP, test.hrp) continue } data, err := hex.DecodeString(test.data) - if chk.E(err) { + if err != nil { t.Errorf("%q: invalid hex %q: %v", test.name, test.data, err) continue } if !bytes.Equal(gotData, data) { - t.Errorf( - "%q: mismatched data -- got %x, want %x", test.name, - gotData, data, - ) + t.Errorf("%q: mismatched data -- got %x, want %x", test.name, + gotData, data) continue } // Encode the same data with the HRP converted to all uppercase and // ensure the result is the lowercase version of the original encoded // bech32 string. - gotEncoded, err := EncodeFromBase256( - bytes.ToUpper([]byte(test.hrp)), data, - ) - if chk.E(err) { - t.Errorf( - "%q: unexpected uppercase HRP encode error: %v", test.name, - err, - ) + gotEncoded, err := EncodeFromBase256(bytes.ToUpper([]byte(test.hrp)), data) + if err != nil { + t.Errorf("%q: unexpected uppercase HRP encode error: %v", test.name, + err) } wantEncoded := bytes.ToLower([]byte(str)) if !bytes.Equal(gotEncoded, wantEncoded) { - t.Errorf( - "%q: mismatched encoding -- got %q, want %q", test.name, - gotEncoded, wantEncoded, - ) + t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name, + gotEncoded, wantEncoded) } // Encode the same data with the HRP converted to all lowercase and // ensure the result is the lowercase version of the original encoded // bech32 string. - gotEncoded, err = EncodeFromBase256( - bytes.ToLower([]byte(test.hrp)), data, - ) - if chk.E(err) { - t.Errorf( - "%q: unexpected lowercase HRP encode error: %v", test.name, - err, - ) + gotEncoded, err = EncodeFromBase256(bytes.ToLower([]byte(test.hrp)), data) + if err != nil { + t.Errorf("%q: unexpected lowercase HRP encode error: %v", test.name, + err) } if !bytes.Equal(gotEncoded, wantEncoded) { - t.Errorf( - "%q: mismatched encoding -- got %q, want %q", test.name, - gotEncoded, wantEncoded, - ) + t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name, + gotEncoded, wantEncoded) } // Encode the same data with the HRP converted to mixed upper and // lowercase and ensure the result is the lowercase version of the @@ -571,23 +475,19 @@ func TestBech32Base256(t *testing.T) { mixedHRPBuilder.WriteRune(r) } gotEncoded, err = EncodeFromBase256(mixedHRPBuilder.Bytes(), data) - if chk.E(err) { - t.Errorf( - "%q: unexpected lowercase HRP encode error: %v", test.name, - err, - ) + if err != nil { + t.Errorf("%q: unexpected lowercase HRP encode error: %v", test.name, + err) } if !bytes.Equal(gotEncoded, wantEncoded) { - t.Errorf( - "%q: mismatched encoding -- got %q, want %q", test.name, - gotEncoded, wantEncoded, - ) + t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name, + gotEncoded, wantEncoded) } // Ensure a bit flip in the string is caught. pos := strings.LastIndexAny(test.encoded, "1") flipped := str[:pos+1] + string(str[pos+1]^1) + str[pos+2:] _, _, err = DecodeToBase256([]byte(flipped)) - if !chk.E(err) { + if err == nil { t.Error("expected decoding to fail") } } @@ -599,12 +499,12 @@ func TestBech32Base256(t *testing.T) { func BenchmarkEncodeDecodeCycle(b *testing.B) { // Use a fixed, 49-byte raw data for testing. inputData, err := hex.DecodeString("cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed1") - if chk.E(err) { + if err != nil { b.Fatalf("failed to initialize input data: %v", err) } // Convert this into a 79-byte, base 32 byte slice. base32Input, err := ConvertBits(inputData, 8, 5, true) - if chk.E(err) { + if err != nil { b.Fatalf("failed to convert input to 32 bits-per-element: %v", err) } // Use a fixed hrp for the tests. This should generate an encoded bech32 @@ -617,11 +517,11 @@ func BenchmarkEncodeDecodeCycle(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { str, err := Encode([]byte(hrp), base32Input) - if chk.E(err) { + if err != nil { b.Fatalf("failed to encode input: %v", err) } _, _, err = Decode(str) - if chk.E(err) { + if err != nil { b.Fatalf("failed to decode string: %v", err) } } @@ -679,22 +579,20 @@ func TestConvertBits(t *testing.T) { } for i, tc := range tests { input, err := hex.DecodeString(tc.input) - if chk.E(err) { + if err != nil { t.Fatalf("invalid test input data: %v", err) } expected, err := hex.DecodeString(tc.output) - if chk.E(err) { + if err != nil { t.Fatalf("invalid test output data: %v", err) } actual, err := ConvertBits(input, tc.fromBits, tc.toBits, tc.pad) - if chk.E(err) { + if err != nil { t.Fatalf("test case %d failed: %v", i, err) } if !bytes.Equal(actual, expected) { - t.Fatalf( - "test case %d has wrong output; expected=%x actual=%x", - i, expected, actual, - ) + t.Fatalf("test case %d has wrong output; expected=%x actual=%x", + i, expected, actual) } } } @@ -720,15 +618,13 @@ func TestConvertBitsFailures(t *testing.T) { } for i, tc := range tests { input, err := hex.DecodeString(tc.input) - if chk.E(err) { + if err != nil { t.Fatalf("invalid test input data: %v", err) } _, err = ConvertBits(input, tc.fromBits, tc.toBits, tc.pad) if err != tc.err { - t.Fatalf( - "test case %d failure: expected '%v' got '%v'", i, - tc.err, err, - ) + t.Fatalf("test case %d failure: expected '%v' got '%v'", i, + tc.err, err) } } } @@ -741,14 +637,14 @@ func TestConvertBitsFailures(t *testing.T) { func BenchmarkConvertBitsDown(b *testing.B) { // Use a fixed, 49-byte raw data for testing. inputData, err := hex.DecodeString("cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed1") - if chk.E(err) { + if err != nil { b.Fatalf("failed to initialize input data: %v", err) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, err := ConvertBits(inputData, 8, 5, true) - if chk.E(err) { + if err != nil { b.Fatalf("error converting bits: %v", err) } } @@ -762,14 +658,14 @@ func BenchmarkConvertBitsDown(b *testing.B) { func BenchmarkConvertBitsUp(b *testing.B) { // Use a fixed, 79-byte raw data for testing. inputData, err := hex.DecodeString("190f13030c170e1b1916141a13040a14040b011f01040e01071e0607160b1906070e06130801131b1a0416020e110008081c1f1a0e19040703120e1d0a06181b160d0407070c1a07070d11131d1408") - if chk.E(err) { + if err != nil { b.Fatalf("failed to initialize input data: %v", err) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, err := ConvertBits(inputData, 8, 5, true) - if chk.E(err) { + if err != nil { b.Fatalf("error converting bits: %v", err) } } diff --git a/ec/bech32/example_test.go b/ec/bech32/example_test.go index 0984b3a..ae15651 100644 --- a/ec/bech32/example_test.go +++ b/ec/bech32/example_test.go @@ -7,15 +7,13 @@ package bech32 import ( "encoding/hex" "fmt" - - "orly.dev/chk" ) // This example demonstrates how to decode a bech32 encoded string. func ExampleDecode() { encoded := "bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx" hrp, decoded, err := Decode([]byte(encoded)) - if chk.E(err) { + if err != nil { fmt.Println("Error:", err) } // Show the decoded data. @@ -31,11 +29,11 @@ func ExampleEncode() { data := []byte("Test data") // Convert test data to base32: conv, err := ConvertBits(data, 8, 5, true) - if chk.E(err) { + if err != nil { fmt.Println("Error:", err) } encoded, err := Encode([]byte("customHrp!11111q"), conv) - if chk.E(err) { + if err != nil { fmt.Println("Error:", err) } // Show the encoded data. diff --git a/ec/bench_test.go b/ec/bench_test.go index aee35a5..c4d8625 100644 --- a/ec/bench_test.go +++ b/ec/bench_test.go @@ -8,7 +8,6 @@ import ( "math/big" "testing" - "orly.dev/chk" "orly.dev/ec/secp256k1" "orly.dev/hex" ) @@ -36,7 +35,7 @@ func setHex(hexString string) *FieldVal { // called with hard-coded values. func hexToFieldVal(s string) *FieldVal { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var f FieldVal @@ -150,7 +149,7 @@ func BenchmarkScalarMult(b *testing.B) { // must only) be called with hard-coded values. func hexToModNScalar(s string) *ModNScalar { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var scalar ModNScalar diff --git a/ec/btcec_test.go b/ec/btcec_test.go index 807fafb..3d80d79 100644 --- a/ec/btcec_test.go +++ b/ec/btcec_test.go @@ -11,8 +11,6 @@ import ( "fmt" "math/big" "testing" - - "orly.dev/chk" ) // isJacobianOnS256Curve returns boolean if the point (x,y,z) is on the @@ -231,24 +229,18 @@ func TestAddJacobian(t *testing.T) { // Ensure the test data is using points that are actually on // the curve (or the point at infinity). if !p1.Z.IsZero() && !isJacobianOnS256Curve(&p1) { - t.Errorf( - "#%d first point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d first point is not on the curve -- "+ + "invalid test data", i) continue } if !p2.Z.IsZero() && !isJacobianOnS256Curve(&p2) { - t.Errorf( - "#%d second point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d second point is not on the curve -- "+ + "invalid test data", i) continue } if !want.Z.IsZero() && !isJacobianOnS256Curve(&want) { - t.Errorf( - "#%d expected point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d expected point is not on the curve -- "+ + "invalid test data", i) continue } // Add the two points. @@ -257,11 +249,8 @@ func TestAddJacobian(t *testing.T) { // Ensure result matches expected. if !r.X.Equals(&want.X) || !r.Y.Equals(&want.Y) || !r.Z.Equals(&want.Z) { - t.Errorf( - "#%d wrong result\ngot: (%v, %v, %v)\n"+ - "want: (%v, %v, %v)", i, r.X, r.Y, r.Z, want.X, want.Y, - want.Z, - ) + t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+ + "want: (%v, %v, %v)", i, r.X, r.Y, r.Z, want.X, want.Y, want.Z) continue } } @@ -334,24 +323,18 @@ func TestAddAffine(t *testing.T) { // Ensure the test data is using points that are actually on // the curve (or the point at infinity). if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) { - t.Errorf( - "#%d first point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d first point is not on the curve -- "+ + "invalid test data", i) continue } if !(x2.Sign() == 0 && y2.Sign() == 0) && !S256().IsOnCurve(x2, y2) { - t.Errorf( - "#%d second point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d second point is not on the curve -- "+ + "invalid test data", i) continue } if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) { - t.Errorf( - "#%d expected point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d expected point is not on the curve -- "+ + "invalid test data", i) continue } // Add the two points. @@ -359,10 +342,8 @@ func TestAddAffine(t *testing.T) { // Ensure result matches expected. if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 { - t.Errorf( - "#%d wrong result\ngot: (%x, %x)\n"+ - "want: (%x, %x)", i, rx, ry, x3, y3, - ) + t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+ + "want: (%x, %x)", i, rx, ry, x3, y3) continue } } @@ -429,17 +410,13 @@ func TestDoubleJacobian(t *testing.T) { // Ensure the test data is using points that are actually on // the curve (or the point at infinity). if !p1.Z.IsZero() && !isJacobianOnS256Curve(&p1) { - t.Errorf( - "#%d first point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d first point is not on the curve -- "+ + "invalid test data", i) continue } if !want.Z.IsZero() && !isJacobianOnS256Curve(&want) { - t.Errorf( - "#%d expected point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d expected point is not on the curve -- "+ + "invalid test data", i) continue } // Double the point. @@ -447,11 +424,9 @@ func TestDoubleJacobian(t *testing.T) { DoubleNonConst(&p1, &result) // Ensure result matches expected. if !isStrictlyEqual(&result, &want) { - t.Errorf( - "#%d wrong result\ngot: (%v, %v, %v)\n"+ - "want: (%v, %v, %v)", i, result.X, result.Y, result.Z, - want.X, want.Y, want.Z, - ) + t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+ + "want: (%v, %v, %v)", i, result.X, result.Y, result.Z, + want.X, want.Y, want.Z) continue } } @@ -506,17 +481,13 @@ func TestDoubleAffine(t *testing.T) { // Ensure the test data is using points that are actually on // the curve (or the point at infinity). if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) { - t.Errorf( - "#%d first point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d first point is not on the curve -- "+ + "invalid test data", i) continue } if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) { - t.Errorf( - "#%d expected point is not on the curve -- "+ - "invalid test data", i, - ) + t.Errorf("#%d expected point is not on the curve -- "+ + "invalid test data", i) continue } // Double the point. @@ -524,10 +495,8 @@ func TestDoubleAffine(t *testing.T) { // Ensure result matches expected. if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 { - t.Errorf( - "#%d wrong result\ngot: (%x, %x)\n"+ - "want: (%x, %x)", i, rx, ry, x3, y3, - ) + t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+ + "want: (%x, %x)", i, rx, ry, x3, y3) continue } } @@ -584,10 +553,8 @@ func TestBaseMult(t *testing.T) { } x, y := s256.ScalarBaseMult(k.Bytes()) if fmt.Sprintf("%X", x) != e.x || fmt.Sprintf("%X", y) != e.y { - t.Errorf( - "%d: bad output for k=%s: got (%X, %X), want (%s, %s)", i, - e.k, x, y, e.x, e.y, - ) + t.Errorf("%d: bad output for k=%s: got (%X, %X), want (%s, %s)", i, + e.k, x, y, e.x, e.y) } if testing.Short() && i > 5 { break @@ -601,17 +568,15 @@ func TestBaseMultVerify(t *testing.T) { for i := 0; i < 30; i++ { data := make([]byte, bytes) _, err := rand.Read(data) - if chk.E(err) { + if err != nil { t.Errorf("failed to read random data for %d", i) continue } x, y := s256.ScalarBaseMult(data) xWant, yWant := s256.ScalarMult(s256.Gx, s256.Gy, data) if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 { - t.Errorf( - "%d: bad output for %X: got (%X, %X), want (%X, %X)", - i, data, x, y, xWant, yWant, - ) + t.Errorf("%d: bad output for %X: got (%X, %X), want (%X, %X)", + i, data, x, y, xWant, yWant) } if testing.Short() && i > 2 { break @@ -654,10 +619,8 @@ func TestScalarMult(t *testing.T) { yWant, _ := new(big.Int).SetString(test.ry, 16) xGot, yGot := s256.ScalarMult(x, y, k.Bytes()) if xGot.Cmp(xWant) != 0 || yGot.Cmp(yWant) != 0 { - t.Fatalf( - "%d: bad output: got (%X, %X), want (%X, %X)", i, xGot, - yGot, xWant, yWant, - ) + t.Fatalf("%d: bad output: got (%X, %X), want (%X, %X)", i, xGot, + yGot, xWant, yWant) } } } @@ -675,7 +638,7 @@ func TestScalarMultRand(t *testing.T) { for i := 0; i < 1024; i++ { data := make([]byte, 32) _, err := rand.Read(data) - if chk.E(err) { + if err != nil { t.Fatalf("failed to read random data at %d", i) break } @@ -683,10 +646,8 @@ func TestScalarMultRand(t *testing.T) { exponent.Mul(exponent, new(big.Int).SetBytes(data)) xWant, yWant := s256.ScalarBaseMult(exponent.Bytes()) if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 { - t.Fatalf( - "%d: bad output for %X: got (%X, %X), want (%X, %X)", i, - data, x, y, xWant, yWant, - ) + t.Fatalf("%d: bad output for %X: got (%X, %X), want (%X, %X)", i, + data, x, y, xWant, yWant) break } } @@ -838,7 +799,7 @@ func TestSplitKRand(t *testing.T) { for i := 0; i < 1024; i++ { bytesK := make([]byte, 32) _, err := rand.Read(bytesK) - if chk.E(err) { + if err != nil { t.Fatalf("failed to read random data at %d", i) break } @@ -863,7 +824,7 @@ func TestSplitKRand(t *testing.T) { func testKeyGeneration(t *testing.T, c *KoblitzCurve, tag string) { priv, err := NewSecretKey() - if chk.E(err) { + if err != nil { t.Errorf("%s: error: %s", tag, err) return } @@ -887,10 +848,8 @@ func checkNAFEncoding(pos, neg []byte, origValue *big.Int) error { return fmt.Errorf("positive has leading zero -- got %x", pos) } if len(neg) > len(pos) { - return fmt.Errorf( - "negative has len %d > pos len %d", len(neg), - len(pos), - ) + return fmt.Errorf("negative has len %d > pos len %d", len(neg), + len(pos)) } // Ensure the result doesn't have any adjacent non-zero digits. gotPos := new(big.Int).SetBytes(pos) @@ -900,10 +859,8 @@ func checkNAFEncoding(pos, neg []byte, origValue *big.Int) error { for bit := 1; bit < posOrNeg.BitLen(); bit++ { thisBit := posOrNeg.Bit(bit) if prevBit == 1 && thisBit == 1 { - return fmt.Errorf( - "adjacent non-zero digits found at bit pos %d", - bit-1, - ) + return fmt.Errorf("adjacent non-zero digits found at bit pos %d", + bit-1) } prevBit = thisBit } @@ -911,10 +868,8 @@ func checkNAFEncoding(pos, neg []byte, origValue *big.Int) error { // NAF representation sum back to the original value. gotValue := new(big.Int).Sub(gotPos, gotNeg) if origValue.Cmp(gotValue) != 0 { - return fmt.Errorf( - "pos-neg is not original value: got %x, want %x", - gotValue, origValue, - ) + return fmt.Errorf("pos-neg is not original value: got %x, want %x", + gotValue, origValue) } return nil } diff --git a/ec/chaincfg/deployment_time_frame.go b/ec/chaincfg/deployment_time_frame.go index 42253c4..3d8da00 100644 --- a/ec/chaincfg/deployment_time_frame.go +++ b/ec/chaincfg/deployment_time_frame.go @@ -4,13 +4,12 @@ import ( "fmt" "time" - "orly.dev/chk" "orly.dev/ec/wire" ) var ( // ErrNoBlockClock is returned when an operation fails due to lack of - // synchronization with the current up-to-date block clock. + // synchornization with the current up to date block clock. ErrNoBlockClock = fmt.Errorf("no block clock synchronized") ) @@ -89,7 +88,7 @@ func (m *MedianTimeDeploymentStarter) HasStarted(blkHeader *wire.BlockHeader) ( return true, nil } medianTime, err := m.blockClock.PastMedianTime(blkHeader) - if chk.E(err) { + if err != nil { return false, err } // We check both after and equal here as after will fail for equivalent @@ -130,7 +129,7 @@ func (m *MedianTimeDeploymentEnder) HasEnded(blkHeader *wire.BlockHeader) ( return false, nil } medianTime, err := m.blockClock.PastMedianTime(blkHeader) - if chk.E(err) { + if err != nil { return false, err } // We check both after and equal here as after will fail for equivalent diff --git a/ec/chaincfg/params.go b/ec/chaincfg/params.go index 4121fbd..dfa5b63 100644 --- a/ec/chaincfg/params.go +++ b/ec/chaincfg/params.go @@ -5,7 +5,6 @@ import ( "math/big" "time" - "orly.dev/chk" "orly.dev/ec/chainhash" "orly.dev/ec/wire" ) @@ -480,7 +479,7 @@ var MainNetParams = Params{ // hard-coded, and therefore known good, hashes. func newHashFromStr(hexStr string) *chainhash.Hash { hash, err := chainhash.NewHashFromStr(hexStr) - if chk.E(err) { + if err != nil { // Ordinarily I don't like panics in library code since it // can take applications down without them having a chance to // recover which is extremely annoying, however an exception is diff --git a/ec/chainhash/hash.go b/ec/chainhash/hash.go index c188834..5b1f0b0 100644 --- a/ec/chainhash/hash.go +++ b/ec/chainhash/hash.go @@ -9,10 +9,8 @@ import ( "encoding/json" "fmt" - "github.com/minio/sha256-simd" - "orly.dev/chk" - "orly.dev/hex" + "orly.dev/sha256" ) const ( @@ -125,11 +123,11 @@ func (hash *Hash) UnmarshalJSON(input []byte) error { } var sh string err := json.Unmarshal(input, &sh) - if chk.E(err) { + if err != nil { return err } newHash, err := NewHashFromStr(sh) - if chk.E(err) { + if err != nil { return err } return hash.SetBytes(newHash[:]) @@ -140,7 +138,7 @@ func (hash *Hash) UnmarshalJSON(input []byte) error { func NewHash(newHash []byte) (*Hash, error) { var sh Hash err := sh.SetBytes(newHash) - if chk.E(err) { + if err != nil { return nil, err } return &sh, err @@ -176,7 +174,7 @@ func TaggedHash(tag []byte, msgs ...[]byte) *Hash { func NewHashFromStr(hash string) (*Hash, error) { ret := new(Hash) err := Decode(ret, hash) - if chk.E(err) { + if err != nil { return nil, err } return ret, nil @@ -205,7 +203,7 @@ func Decode(dst *Hash, src string) error { reversedHash[HashSize-hex.DecLen(len(srcBytes)):], srcBytes, ) - if chk.E(err) { + if err != nil { return err } // Reverse copy from the temporary hash to destination. Because the @@ -221,7 +219,7 @@ func Decode(dst *Hash, src string) error { func decodeLegacy(dst *Hash, src []byte) error { var hashBytes []byte err := json.Unmarshal(src, &hashBytes) - if chk.E(err) { + if err != nil { return err } if len(hashBytes) != HashSize { diff --git a/ec/chainhash/hash_test.go b/ec/chainhash/hash_test.go index b5e47d8..92a4456 100644 --- a/ec/chainhash/hash_test.go +++ b/ec/chainhash/hash_test.go @@ -7,28 +7,23 @@ package chainhash import ( "bytes" "testing" - - "orly.dev/chk" ) // mainNetGenesisHash is the hash of the first block in the block chain for the // main network (genesis block). -var mainNetGenesisHash = Hash( - [HashSize]byte{ - // Make go vet happy. - 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, - 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, - 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, - 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, - }, -) +var mainNetGenesisHash = Hash([HashSize]byte{ // Make go vet happy. + 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, + 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, + 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, + 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, +}) // TestHash tests the Hash API. func TestHash(t *testing.T) { // Hash of block 234439. blockHashStr := "14a0810ac680a3eb3f82edc878cea25ec41d6b790744e5daeef" blockHash, err := NewHashFromStr(blockHashStr) - if chk.E(err) { + if err != nil { t.Errorf("NewHashFromStr: %v", err) } // Hash of block 234440 as byte slice. @@ -39,40 +34,32 @@ func TestHash(t *testing.T) { 0xa6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, } hash, err := NewHash(buf) - if chk.E(err) { + if err != nil { t.Errorf("NewHash: unexpected error %v", err) } // Ensure proper size. if len(hash) != HashSize { - t.Errorf( - "NewHash: hash length mismatch - got: %v, want: %v", - len(hash), HashSize, - ) + t.Errorf("NewHash: hash length mismatch - got: %v, want: %v", + len(hash), HashSize) } // Ensure contents match. if !bytes.Equal(hash[:], buf) { - t.Errorf( - "NewHash: hash contents mismatch - got: %v, want: %v", - hash[:], buf, - ) + t.Errorf("NewHash: hash contents mismatch - got: %v, want: %v", + hash[:], buf) } // Ensure contents of hash of block 234440 don't match 234439. if hash.IsEqual(blockHash) { - t.Errorf( - "IsEqual: hash contents should not match - got: %v, want: %v", - hash, blockHash, - ) + t.Errorf("IsEqual: hash contents should not match - got: %v, want: %v", + hash, blockHash) } // Set hash from byte slice and ensure contents match. err = hash.SetBytes(blockHash.CloneBytes()) - if chk.E(err) { + if err != nil { t.Errorf("SetBytes: %v", err) } if !hash.IsEqual(blockHash) { - t.Errorf( - "IsEqual: hash contents mismatch - got: %v, want: %v", - hash, blockHash, - ) + t.Errorf("IsEqual: hash contents mismatch - got: %v, want: %v", + hash, blockHash) } // Ensure nil hashes are handled properly. if !(*Hash)(nil).IsEqual(nil) { @@ -83,13 +70,13 @@ func TestHash(t *testing.T) { } // Invalid size for SetBytes. err = hash.SetBytes([]byte{0x00}) - if !chk.E(err) { + if err == nil { t.Errorf("SetBytes: failed to received expected err - got: nil") } // Invalid size for NewHash. invalidHash := make([]byte, HashSize+1) _, err = NewHash(invalidHash) - if !chk.E(err) { + if err == nil { t.Errorf("NewHash: failed to received expected err - got: nil") } } @@ -98,21 +85,16 @@ func TestHash(t *testing.T) { func TestHashString(t *testing.T) { // Block 100000 hash. wantStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" - hash := Hash( - [HashSize]byte{ - // Make go vet happy. - 0x06, 0xe5, 0x33, 0xfd, 0x1a, 0xda, 0x86, 0x39, - 0x1f, 0x3f, 0x6c, 0x34, 0x32, 0x04, 0xb0, 0xd2, - 0x78, 0xd4, 0xaa, 0xec, 0x1c, 0x0b, 0x20, 0xaa, - 0x27, 0xba, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, - }, - ) + hash := Hash([HashSize]byte{ // Make go vet happy. + 0x06, 0xe5, 0x33, 0xfd, 0x1a, 0xda, 0x86, 0x39, + 0x1f, 0x3f, 0x6c, 0x34, 0x32, 0x04, 0xb0, 0xd2, + 0x78, 0xd4, 0xaa, 0xec, 0x1c, 0x0b, 0x20, 0xaa, + 0x27, 0xba, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + }) hashStr := hash.String() if hashStr != wantStr { - t.Errorf( - "String: wrong hash string - got %v, want %v", - hashStr, wantStr, - ) + t.Errorf("String: wrong hash string - got %v, want %v", + hashStr, wantStr) } } @@ -186,7 +168,7 @@ func TestHashString(t *testing.T) { // if err != test.err { // t.Errorf(unexpectedErrStr, i, err, test.err) // continue -// } else if chk.E(err) { +// } else if err != nil { // // Got expected error. Move on to the next test. // continue // } @@ -202,16 +184,16 @@ func TestHashString(t *testing.T) { // hashStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506" // legacyHashStr := []byte("[6,229,51,253,26,218,134,57,31,63,108,52,50,4,176,210,120,212,170,236,28,11,32,170,39,186,3,0,0,0,0,0]") // hash, err := NewHashFromStr(hashStr) -// if chk.E(err) { +// if err != nil { // t.Errorf("NewHashFromStr error:%v, hashStr:%s", err, hashStr) // } // hashBytes, err := json.Marshal(hash) -// if chk.E(err) { +// if err != nil { // t.Errorf("Marshal json error:%v, hash:%v", err, hashBytes) // } // var newHash Hash // err = json.Unmarshal(hashBytes, &newHash) -// if chk.E(err) { +// if err != nil { // t.Errorf("Unmarshal json error:%v, hash:%v", err, hashBytes) // } // if !hash.IsEqual(&newHash) { @@ -219,7 +201,7 @@ func TestHashString(t *testing.T) { // newHash.String(), hashStr) // } // err = newHash.Unmarshal(legacyHashStr) -// if chk.E(err) { +// if err != nil { // t.Errorf("Unmarshal legacy json error:%v, hash:%v", err, legacyHashStr) // } // if !hash.IsEqual(&newHash) { diff --git a/ec/chainhash/hashfuncs.go b/ec/chainhash/hashfuncs.go index c4321d1..a21b719 100644 --- a/ec/chainhash/hashfuncs.go +++ b/ec/chainhash/hashfuncs.go @@ -5,7 +5,7 @@ package chainhash -import "github.com/minio/sha256-simd" +import "orly.dev/sha256" // HashB calculates hash(b) and returns the resulting bytes. func HashB(b []byte) []byte { diff --git a/ec/ciphering_test.go b/ec/ciphering_test.go index 24ea84a..b2a07c2 100644 --- a/ec/ciphering_test.go +++ b/ec/ciphering_test.go @@ -7,27 +7,23 @@ package btcec import ( "bytes" "testing" - - "orly.dev/chk" ) func TestGenerateSharedSecret(t *testing.T) { privKey1, err := NewSecretKey() - if chk.E(err) { + if err != nil { t.Errorf("secret key generation error: %s", err) return } privKey2, err := NewSecretKey() - if chk.E(err) { + if err != nil { t.Errorf("secret key generation error: %s", err) return } secret1 := GenerateSharedSecret(privKey1, privKey2.PubKey()) secret2 := GenerateSharedSecret(privKey2, privKey1.PubKey()) if !bytes.Equal(secret1, secret2) { - t.Errorf( - "ECDH failed, secrets mismatch - first: %x, second: %x", - secret1, secret2, - ) + t.Errorf("ECDH failed, secrets mismatch - first: %x, second: %x", + secret1, secret2) } } diff --git a/ec/curve.go b/ec/curve.go index e17aee9..4423f32 100644 --- a/ec/curve.go +++ b/ec/curve.go @@ -6,7 +6,6 @@ package btcec import ( "fmt" - "orly.dev/chk" "orly.dev/ec/secp256k1" ) @@ -84,7 +83,7 @@ func ParseJacobian(point []byte) (JacobianPoint, error) { return infinityPoint, nil } noncePk, err := secp256k1.ParsePubKey(point) - if chk.E(err) { + if err != nil { return JacobianPoint{}, err } noncePk.AsJacobian(&result) diff --git a/ec/ecdsa/bench_test.go b/ec/ecdsa/bench_test.go index 0ddd996..afb2e17 100644 --- a/ec/ecdsa/bench_test.go +++ b/ec/ecdsa/bench_test.go @@ -8,7 +8,6 @@ package ecdsa import ( "testing" - "orly.dev/chk" "orly.dev/ec/secp256k1" "orly.dev/hex" ) @@ -19,7 +18,7 @@ import ( // must only) be called with hard-coded values. func hexToModNScalar(s string) *secp256k1.ModNScalar { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var scalar secp256k1.ModNScalar @@ -35,7 +34,7 @@ func hexToModNScalar(s string) *secp256k1.ModNScalar { // called with hard-coded values. func hexToFieldVal(s string) *secp256k1.FieldVal { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var f secp256k1.FieldVal @@ -153,7 +152,7 @@ func BenchmarkRecoverCompact(b *testing.B) { msgHash := hexToBytes("c301ba9de5d6053caad9f5eb46523f007702add2c62fa39de03146a36b8026b7") // Ensure a valid compact signature is being benchmarked. pubKey, wasCompressed, err := RecoverCompact(compactSig, msgHash) - if chk.E(err) { + if err != nil { b.Fatalf("unexpected err: %v", err) } if !wasCompressed { diff --git a/ec/ecdsa/example_test.go b/ec/ecdsa/example_test.go index 30152f3..3b4ca8b 100644 --- a/ec/ecdsa/example_test.go +++ b/ec/ecdsa/example_test.go @@ -13,7 +13,7 @@ package ecdsa_test // // Decode a hex-encoded secret key. // pkBytes, err := hex.Dec("22a47fa09a223f2aa079edf85a7c2d4f87" + // "20ee63e502ee2869afab7de234b80c") -// if chk.E(err) { +// if err != nil { // fmt.Println(err) // return // } @@ -44,12 +44,12 @@ package ecdsa_test // // Decode hex-encoded serialized public key. // pubKeyBytes, err := hex.Dec("02a673638cb9587cb68ea08dbef685c" + // "6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5") -// if chk.E(err) { +// if err != nil { // fmt.Println(err) // return // } // pubKey, err := secp256k1.ParsePubKey(pubKeyBytes) -// if chk.E(err) { +// if err != nil { // fmt.Println(err) // return // } @@ -58,12 +58,12 @@ package ecdsa_test // sigBytes, err := hex.Dec("3045022100fcc0a8768cfbcefcf2cadd7cfb0" + // "fb18ed08dd2e2ae84bef1a474a3d351b26f0302200fc1a350b45f46fa0010139130" + // "2818d748c2b22615511a3ffd5bb638bd777207") -// if chk.E(err) { +// if err != nil { // fmt.Println(err) // return // } // signature, err := ecdsa.ParseDERSignature(sigBytes) -// if chk.E(err) { +// if err != nil { // fmt.Println(err) // return // } diff --git a/ec/ecdsa/signature_test.go b/ec/ecdsa/signature_test.go index 4121723..355f57c 100644 --- a/ec/ecdsa/signature_test.go +++ b/ec/ecdsa/signature_test.go @@ -12,10 +12,10 @@ import ( "bytes" "errors" "math/rand" + "orly.dev/chk" "testing" "time" - "orly.dev/chk" "orly.dev/ec/secp256k1" "orly.dev/hex" ) @@ -26,7 +26,7 @@ import ( // hard-coded values. func hexToBytes(s string) []byte { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } return b @@ -880,7 +880,7 @@ func TestSignatureIsEqual(t *testing.T) { // // or not the signature was for a compressed public key are the // // expected values. // gotPubKey, gotCompressed, err := RecoverCompact(gotSig, hash) -// if chk.E(err) { +// if err != nil { // t.Errorf("%s: unexpected error when recovering: %v", test.name, // err) // continue @@ -1092,7 +1092,7 @@ func TestSignAndRecoverCompactRandom(t *testing.T) { gotSig := SignCompact(secKey, hash[:], compressed) gotPubKey, gotCompressed, err := RecoverCompact(gotSig, hash[:]) - if chk.E(err) { + if err != nil { t.Fatalf( "unexpected err: %v\nsig: %x\nhash: %x\nsecret key: %x", err, gotSig, hash, secKey.Serialize(), @@ -1120,7 +1120,7 @@ func TestSignAndRecoverCompactRandom(t *testing.T) { randBit := rng.Intn(7) badSig[randByte] ^= 1 << randBit badPubKey, _, err := RecoverCompact(badSig, hash[:]) - if !chk.E(err) && badPubKey.IsEqual(wantPubKey) { + if err == nil && badPubKey.IsEqual(wantPubKey) { t.Fatalf( "recovered public key for bad sig: %x\nhash: %x\n"+ "secret key: %x", badSig, hash, secKey.Serialize(), @@ -1135,7 +1135,7 @@ func TestSignAndRecoverCompactRandom(t *testing.T) { randBit = rng.Intn(7) badHash[randByte] ^= 1 << randBit badPubKey, _, err = RecoverCompact(gotSig, badHash[:]) - if !chk.E(err) && badPubKey.IsEqual(wantPubKey) { + if err == nil && badPubKey.IsEqual(wantPubKey) { t.Fatalf( "recovered public key for bad hash: %x\nsig: %x\n"+ "secret key: %x", badHash, gotSig, secKey.Serialize(), diff --git a/ec/ecdsa/util_test.go b/ec/ecdsa/util_test.go new file mode 100644 index 0000000..444d26b --- /dev/null +++ b/ec/ecdsa/util_test.go @@ -0,0 +1,9 @@ +package ecdsa_test + +import ( + "orly.dev/lol" +) + +var ( + log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf +) diff --git a/ec/field_test.go b/ec/field_test.go index 6c3b545..9e5d164 100644 --- a/ec/field_test.go +++ b/ec/field_test.go @@ -7,9 +7,9 @@ package btcec import ( "math/rand" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/hex" ) @@ -1189,7 +1189,7 @@ func TestFieldSquareRoot(t *testing.T) { // hard-coded values. func hexToBytes(s string) []byte { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } return b diff --git a/ec/fuzz_test.go b/ec/fuzz_test.go index cf94010..df143e1 100644 --- a/ec/fuzz_test.go +++ b/ec/fuzz_test.go @@ -11,7 +11,6 @@ package btcec import ( "testing" - "orly.dev/chk" "orly.dev/hex" ) @@ -29,7 +28,7 @@ func FuzzParsePubKey(f *testing.F) { } for _, pubKey := range recoveryTestPubKeys { seed, err := hex.Dec(pubKey) - if chk.E(err) { + if err != nil { f.Fatal(err) } f.Add(seed) @@ -38,10 +37,10 @@ func FuzzParsePubKey(f *testing.F) { f.Fuzz( func(t *testing.T, input []byte) { key, err := ParsePubKey(input) - if key == nil && !chk.E(err) { + if key == nil && err == nil { panic("key==nil && err==nil") } - if key != nil && chk.E(err) { + if key != nil && err != nil { panic("key!=nil yet err!=nil") } }, diff --git a/ec/musig2/bench_test.go b/ec/musig2/bench_test.go index b4b06f6..8cadcbd 100644 --- a/ec/musig2/bench_test.go +++ b/ec/musig2/bench_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "orly.dev/chk" "orly.dev/ec" "orly.dev/ec/schnorr" "orly.dev/hex" @@ -21,7 +20,7 @@ var ( func hexToBytes(s string) []byte { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } return b @@ -29,7 +28,7 @@ func hexToBytes(s string) []byte { func hexToModNScalar(s string) *btcec.ModNScalar { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var scalar btcec.ModNScalar @@ -41,12 +40,12 @@ func hexToModNScalar(s string) *btcec.ModNScalar { func genSigner(t *testing.B) signer { privKey, err := btcec.NewSecretKey() - if chk.E(err) { + if err != nil { t.Fatalf("unable to gen priv key: %v", err) } pubKey := privKey.PubKey() nonces, err := GenNonces(WithPublicKey(pubKey)) - if chk.E(err) { + if err != nil { t.Fatalf("unable to gen nonces: %v", err) } return signer{ @@ -77,7 +76,7 @@ func BenchmarkPartialSign(b *testing.B) { signers[i] = genSigner(b) } combinedNonce, err := AggregateNonces(signers.pubNonces()) - if chk.E(err) { + if err != nil { b.Fatalf("unable to generate combined nonce: %v", err) } var sig *PartialSignature @@ -100,7 +99,7 @@ func BenchmarkPartialSign(b *testing.B) { signers[0].nonces.SecNonce, signers[0].privKey, combinedNonce, keys, msg, signOpts..., ) - if chk.E(err) { + if err != nil { b.Fatalf("unable to generate sig: %v", err) } } @@ -132,7 +131,7 @@ func BenchmarkPartialSign(b *testing.B) { // combinedNonce, err := AggregateNonces( // signers.pubNonces(), // ) -// if chk.E(err) { +// if err != nil { // b.Fatalf("unable to generate combined "+ // "nonce: %v", err) // } @@ -145,7 +144,7 @@ func BenchmarkPartialSign(b *testing.B) { // signers[0].nonces.SecNonce, signers[0].privKey, // combinedNonce, signers.keys(), msg, // ) -// if chk.E(err) { +// if err != nil { // b.Fatalf("unable to generate sig: %v", err) // } // keys := signers.keys() @@ -187,7 +186,7 @@ func BenchmarkCombineSigs(b *testing.B) { signers[i] = genSigner(b) } combinedNonce, err := AggregateNonces(signers.pubNonces()) - if chk.E(err) { + if err != nil { b.Fatalf("unable to generate combined nonce: %v", err) } var msg [32]byte @@ -199,7 +198,7 @@ func BenchmarkCombineSigs(b *testing.B) { signer.nonces.SecNonce, signer.privKey, combinedNonce, signers.keys(), msg, ) - if chk.E(err) { + if err != nil { b.Fatalf( "unable to generate partial sig: %v", err, @@ -239,7 +238,7 @@ func BenchmarkAggregateNonces(b *testing.B) { b.ResetTimer() b.ReportAllocs() pubNonce, err := AggregateNonces(nonces) - if chk.E(err) { + if err != nil { b.Fatalf("unable to generate nonces: %v", err) } testNonce = pubNonce diff --git a/ec/musig2/context.go b/ec/musig2/context.go index 6b8cc29..4c2d5f1 100644 --- a/ec/musig2/context.go +++ b/ec/musig2/context.go @@ -4,8 +4,8 @@ package musig2 import ( "fmt" - "orly.dev/chk" + "orly.dev/ec" "orly.dev/ec/schnorr" ) @@ -214,7 +214,7 @@ func NewContext( WithPublicKey(ctx.pubKey), WithNonceSecretKeyAux(signingKey), ) - if chk.E(err) { + if err != nil { return nil, err } } @@ -266,7 +266,7 @@ func (c *Context) combineSignerKeys() error { c.combinedKey, _, _, err = AggregateKeys( c.opts.keySet, c.shouldSort, keyAggOpts..., ) - if chk.E(err) { + if err != nil { return err } return nil @@ -425,7 +425,7 @@ func (c *Context) NewSession(options ...SessionOption) (*Session, error) { WithNonceSecretKeyAux(c.signingKey), WithNonceCombinedKeyAux(c.combinedKey.FinalKey), ) - if chk.E(err) { + if err != nil { return nil, err } } @@ -469,7 +469,7 @@ func (s *Session) RegisterPubNonce(nonce [PubNonceSize]byte) (bool, error) { // now. if haveAllNonces { combinedNonce, err := AggregateNonces(s.pubNonces) - if chk.E(err) { + if err != nil { return false, err } s.combinedNonce = &combinedNonce @@ -514,7 +514,7 @@ func (s *Session) Sign( // Now that we've generated our signature, we'll make sure to blank out // our signing nonce. s.localNonces = nil - if chk.E(err) { + if err != nil { return nil, err } s.msg = msg diff --git a/ec/musig2/keys.go b/ec/musig2/keys.go index 61c9a7f..9a76ed2 100644 --- a/ec/musig2/keys.go +++ b/ec/musig2/keys.go @@ -7,7 +7,6 @@ import ( "fmt" "sort" - "orly.dev/chk" "orly.dev/ec" "orly.dev/ec/chainhash" "orly.dev/ec/schnorr" @@ -400,7 +399,7 @@ func AggregateKeys( finalKeyJ, parityAcc, opts.tweaks[i-1].Tweak, tweakAcc, opts.tweaks[i-1].IsXOnly, ) - if chk.E(err) { + if err != nil { return nil, nil, nil, err } } diff --git a/ec/musig2/keys_test.go b/ec/musig2/keys_test.go index ee15be2..bde52f0 100644 --- a/ec/musig2/keys_test.go +++ b/ec/musig2/keys_test.go @@ -11,7 +11,6 @@ import ( "testing" "github.com/stretchr/testify/require" - "orly.dev/chk" "orly.dev/ec" "orly.dev/ec/schnorr" @@ -93,7 +92,7 @@ func keysFromIndices( inputKeys[i], err = btcec.ParsePubKey( mustParseHex(pubKeys[keyIdx]), ) - if chk.E(err) { + if err != nil { return nil, err } } @@ -175,7 +174,7 @@ func TestMuSig2KeyAggTestVectors(t *testing.T) { ) // In this set of test cases, we should only get this // for the very first vector. - if chk.E(err) { + if err != nil { switch testCase.Comment { case "Invalid public key": require.ErrorIs( diff --git a/ec/musig2/musig2_test.go b/ec/musig2/musig2_test.go index 9fd6d66..2ad309d 100644 --- a/ec/musig2/musig2_test.go +++ b/ec/musig2/musig2_test.go @@ -8,11 +8,9 @@ import ( "sync" "testing" - "github.com/minio/sha256-simd" - "orly.dev/chk" - "orly.dev/ec" "orly.dev/hex" + "orly.dev/sha256" ) const ( @@ -21,7 +19,7 @@ const ( func mustParseHex(str string) []byte { b, err := hex.Dec(str) - if chk.E(err) { + if err != nil { panic(fmt.Errorf("unable to parse hex: %v", err)) } return b @@ -80,7 +78,7 @@ func testMultiPartySign( signSet := make([]*btcec.PublicKey, numSigners) for i := 0; i < numSigners; i++ { privKey, err := btcec.NewSecretKey() - if chk.E(err) { + if err != nil { t.Fatalf("unable to gen priv key: %v", err) } pubKey := privKey.PubKey() @@ -106,17 +104,17 @@ func testMultiPartySign( signCtx, err := NewContext( signerKey, false, ctxOpts..., ) - if chk.E(err) { + if err != nil { t.Fatalf("unable to generate context: %v", err) } if combinedKey == nil { combinedKey, err = signCtx.CombinedKey() - if chk.E(err) { + if err != nil { t.Fatalf("combined key not available: %v", err) } } session, err := signCtx.NewSession() - if chk.E(err) { + if err != nil { t.Fatalf("unable to generate new session: %v", err) } signers[i] = session @@ -135,7 +133,7 @@ func testMultiPartySign( } nonce := otherCtx.PublicNonce() haveAll, err := signer.RegisterPubNonce(nonce) - if chk.E(err) { + if err != nil { t.Fatalf("unable to add public nonce") } if j == len(signers)-1 && !haveAll { @@ -153,14 +151,14 @@ func testMultiPartySign( for i := range signers { signer := signers[i] partialSig, err := signer.Sign(msg) - if chk.E(err) { + if err != nil { t.Fatalf("unable to generate partial sig: %v", err) } // We don't need to combine the signature for the very first // signer, as it already has that partial signature. if i != 0 { haveAll, err := combiner.CombineSig(partialSig) - if chk.E(err) { + if err != nil { t.Fatalf("unable to combine sigs: %v", err) } @@ -248,11 +246,11 @@ func TestMuSigMultiParty(t *testing.T) { func TestMuSigEarlyNonce(t *testing.T) { t.Parallel() privKey1, err := btcec.NewSecretKey() - if chk.E(err) { + if err != nil { t.Fatalf("unable to gen priv key: %v", err) } privKey2, err := btcec.NewSecretKey() - if chk.E(err) { + if err != nil { t.Fatalf("unable to gen priv key: %v", err) } // If we try to make a context, with just the secret key and sorting @@ -266,14 +264,14 @@ func TestMuSigEarlyNonce(t *testing.T) { ctx1, err := NewContext( privKey1, true, WithNumSigners(numSigners), WithEarlyNonceGen(), ) - if chk.E(err) { + if err != nil { t.Fatalf("unable to make ctx: %v", err) } pubKey1 := ctx1.PubKey() ctx2, err := NewContext( privKey2, true, WithKnownSigners(signers), WithEarlyNonceGen(), ) - if chk.E(err) { + if err != nil { t.Fatalf("unable to make ctx: %v", err) } pubKey2 := ctx2.PubKey() @@ -283,16 +281,16 @@ func TestMuSigEarlyNonce(t *testing.T) { t.Fatalf("unepxected error: %v", err) } _, err = ctx2.CombinedKey() - if chk.E(err) { + if err != nil { t.Fatalf("unable to get combined key: %v", err) } // The early nonces _should_ be available at this point. nonce1, err := ctx1.EarlySessionNonce() - if chk.E(err) { + if err != nil { t.Fatalf("session nonce not available: %v", err) } nonce2, err := ctx2.EarlySessionNonce() - if chk.E(err) { + if err != nil { t.Fatalf("session nonce not available: %v", err) } // The number of registered signers should still be 1 for both parties. @@ -319,7 +317,7 @@ func TestMuSigEarlyNonce(t *testing.T) { } // We'll now register the other signer for party 1. done, err := ctx1.RegisterSigner(&pubKey2) - if chk.E(err) { + if err != nil { t.Fatalf("unable to register signer: %v", err) } if !done { @@ -332,11 +330,11 @@ func TestMuSigEarlyNonce(t *testing.T) { } // We should be able to create the session at this point. session1, err := ctx1.NewSession() - if chk.E(err) { + if err != nil { t.Fatalf("unable to create new session: %v", err) } session2, err := ctx2.NewSession() - if chk.E(err) { + if err != nil { t.Fatalf("unable to create new session: %v", err) } msg := sha256.Sum256([]byte("let's get taprooty, LN style")) @@ -349,14 +347,14 @@ func TestMuSigEarlyNonce(t *testing.T) { // Now we can exchange nonces to continue with the rest of the signing // process as normal. done, err = session1.RegisterPubNonce(nonce2.PubNonce) - if chk.E(err) { + if err != nil { t.Fatalf("unable to register nonce: %v", err) } if !done { t.Fatalf("signer 1 doesn't have all nonces") } done, err = session2.RegisterPubNonce(nonce1.PubNonce) - if chk.E(err) { + if err != nil { t.Fatalf("unable to register nonce: %v", err) } if !done { @@ -369,15 +367,15 @@ func TestMuSigEarlyNonce(t *testing.T) { } // Sign the message and combine the two partial sigs into one. _, err = session1.Sign(msg) - if chk.E(err) { + if err != nil { t.Fatalf("unable to gen sig: %v", err) } sig2, err := session2.Sign(msg) - if chk.E(err) { + if err != nil { t.Fatalf("unable to gen sig: %v", err) } done, err = session1.CombineSig(sig2) - if chk.E(err) { + if err != nil { t.Fatalf("unable to combine sig: %v", err) } if !done { @@ -390,7 +388,7 @@ func TestMuSigEarlyNonce(t *testing.T) { } // Finally, verify that the final signature is valid. combinedKey, err := ctx1.CombinedKey() - if chk.E(err) { + if err != nil { t.Fatalf("unexpected combined key error: %v", err) } finalSig := session1.FinalSig() diff --git a/ec/musig2/nonces.go b/ec/musig2/nonces.go index 55ace36..cdb06d7 100644 --- a/ec/musig2/nonces.go +++ b/ec/musig2/nonces.go @@ -8,8 +8,8 @@ import ( "encoding/binary" "errors" "io" - "orly.dev/chk" + "orly.dev/ec" "orly.dev/ec/chainhash" "orly.dev/ec/schnorr" @@ -244,12 +244,12 @@ func genNonceAuxBytes( } // Next, we'll write out: len(pk) || pk err := writeBytesPrefix(&w, pubkey, uint8Writer) - if chk.E(err) { + if err != nil { return nil, err } // Next, we'll write out: len(aggpk) || aggpk. err = writeBytesPrefix(&w, opts.combinedKey, uint8Writer) - if chk.E(err) { + if err != nil { return nil, err } switch { @@ -269,13 +269,13 @@ func genNonceAuxBytes( return nil, err } err = writeBytesPrefix(&w, opts.msg, uint64Writer) - if chk.E(err) { + if err != nil { return nil, err } } // Finally we'll write out the auxiliary input. err = writeBytesPrefix(&w, opts.auxInput, uint32Writer) - if chk.E(err) { + if err != nil { return nil, err } // Next we'll write out the interaction/index number which will @@ -318,11 +318,11 @@ func GenNonces(options ...NonceGenOption) (*Nonces, error) { // Using our randomness, pubkey and the set of optional params, generate our // two secret nonces: k1 and k2. k1, err := genNonceAuxBytes(randBytes[:], opts.publicKey, 0, opts) - if chk.E(err) { + if err != nil { return nil, err } k2, err := genNonceAuxBytes(randBytes[:], opts.publicKey, 1, opts) - if chk.E(err) { + if err != nil { return nil, err } var k1Mod, k2Mod btcec.ModNScalar @@ -362,7 +362,7 @@ func AggregateNonces(pubNonces [][PubNonceSize]byte) ( // decode. var nonceJ btcec.JacobianPoint nonceJ, err := btcec.ParseJacobian(slicer(pubNonceBytes)) - if chk.E(err) { + if err != nil { return btcec.JacobianPoint{}, err } pubNonceJs[i] = &nonceJ @@ -387,7 +387,7 @@ func AggregateNonces(pubNonces [][PubNonceSize]byte) ( return n[:btcec.PubKeyBytesLenCompressed] }, ) - if chk.E(err) { + if err != nil { return finalNonce, err } combinedNonce2, err := combineNonces( @@ -395,7 +395,7 @@ func AggregateNonces(pubNonces [][PubNonceSize]byte) ( return n[btcec.PubKeyBytesLenCompressed:] }, ) - if chk.E(err) { + if err != nil { return finalNonce, err } copy(finalNonce[:], btcec.JacobianToByteSlice(combinedNonce1)) diff --git a/ec/musig2/nonces_test.go b/ec/musig2/nonces_test.go index 7aadd5e..574d876 100644 --- a/ec/musig2/nonces_test.go +++ b/ec/musig2/nonces_test.go @@ -11,7 +11,6 @@ import ( "testing" "github.com/stretchr/testify/require" - "orly.dev/chk" "orly.dev/hex" ) @@ -61,7 +60,7 @@ func TestMusig2NonceGenTestVectors(t *testing.T) { t.Run( fmt.Sprintf("test_case=%v", i), func(t *testing.T) { nonce, err := GenNonces(withCustomOptions(customOpts)) - if chk.E(err) { + if err != nil { t.Fatalf("err gen nonce aux bytes %v", err) } expectedBytes, _ := hex.Dec(testCase.Expected) @@ -143,7 +142,7 @@ func TestMusig2AggregateNoncesTestVectors(t *testing.T) { t.Run( fmt.Sprintf("invalid_case=%v", i), func(t *testing.T) { _, err := AggregateNonces(testNonces) - require.True(t, chk.E(err)) + require.True(t, err != nil) require.Equal(t, testCase.ExpectedErr, err.Error()) }, ) diff --git a/ec/musig2/sign.go b/ec/musig2/sign.go index 624d9da..6b48329 100644 --- a/ec/musig2/sign.go +++ b/ec/musig2/sign.go @@ -6,8 +6,8 @@ import ( "bytes" "fmt" "io" - "orly.dev/chk" + "orly.dev/ec" "orly.dev/ec/chainhash" "orly.dev/ec/schnorr" @@ -228,13 +228,13 @@ func computeSigningNonce( r1J, err := btcec.ParseJacobian( combinedNonce[:btcec.PubKeyBytesLenCompressed], ) - if chk.E(err) { + if err != nil { return nil, nil, err } r2J, err := btcec.ParseJacobian( combinedNonce[btcec.PubKeyBytesLenCompressed:], ) - if chk.E(err) { + if err != nil { return nil, nil, err } @@ -318,7 +318,7 @@ func Sign( combinedKey, parityAcc, _, err := AggregateKeys( pubKeys, opts.sortKeys, keyAggOpts..., ) - if chk.E(err) { + if err != nil { return nil, err } @@ -328,7 +328,7 @@ func Sign( nonce, nonceBlinder, err := computeSigningNonce( combinedNonce, combinedKey.FinalKey, msg, ) - if chk.E(err) { + if err != nil { return nil, err } @@ -479,7 +479,7 @@ func verifyPartialSig( combinedKey, parityAcc, _, err := AggregateKeys( keySet, opts.sortKeys, keyAggOpts..., ) - if chk.E(err) { + if err != nil { return err } @@ -499,13 +499,13 @@ func verifyPartialSig( r1J, err := btcec.ParseJacobian( combinedNonce[:btcec.PubKeyBytesLenCompressed], ) - if chk.E(err) { + if err != nil { return err } r2J, err := btcec.ParseJacobian( combinedNonce[btcec.PubKeyBytesLenCompressed:], ) - if chk.E(err) { + if err != nil { return err } @@ -521,13 +521,13 @@ func verifyPartialSig( pubNonce1J, err := btcec.ParseJacobian( pubNonce[:btcec.PubKeyBytesLenCompressed], ) - if chk.E(err) { + if err != nil { return err } pubNonce2J, err := btcec.ParseJacobian( pubNonce[btcec.PubKeyBytesLenCompressed:], ) - if chk.E(err) { + if err != nil { return err } @@ -574,7 +574,7 @@ func verifyPartialSig( e.SetByteSlice(challengeBytes[:]) signingKey, err := btcec.ParsePubKey(pubKey) - if chk.E(err) { + if err != nil { return err } diff --git a/ec/musig2/sign_test.go b/ec/musig2/sign_test.go index 6e8fd06..29e5196 100644 --- a/ec/musig2/sign_test.go +++ b/ec/musig2/sign_test.go @@ -12,7 +12,6 @@ import ( "testing" "github.com/stretchr/testify/require" - "orly.dev/chk" "orly.dev/ec" "orly.dev/ec/secp256k1" @@ -125,7 +124,7 @@ func TestMusig2SignVerify(t *testing.T) { pubKeys, err := keysFromIndices( t, testCase.Indices, testCases.PubKeys, ) - if chk.E(err) { + if err != nil { require.ErrorIs(t, err, secp256k1.ErrPubKeyNotOnCurve) return } @@ -182,7 +181,7 @@ func TestMusig2SignVerify(t *testing.T) { err = partialSig.Decode( bytes.NewReader(mustParseHex(testCase.Sig)), ) - if chk.E(err) && strings.Contains( + if err != nil && strings.Contains( testCase.Comment, "group size", ) { require.ErrorIs(t, err, ErrPartialSigInvalid) diff --git a/ec/pubkey_test.go b/ec/pubkey_test.go index 8760a14..e2cc519 100644 --- a/ec/pubkey_test.go +++ b/ec/pubkey_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/davecgh/go-spew/spew" - "orly.dev/chk" ) type pubKeyTest struct { @@ -24,8 +23,7 @@ var pubKeyTests = []pubKeyTest{ // 0437cd7f8525ceed2324359c2d0ba26006d92d85 { name: "uncompressed ok", - key: []byte{ - 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, @@ -39,8 +37,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "uncompressed x changed", - key: []byte{ - 0x04, 0x15, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x04, 0x15, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, @@ -53,8 +50,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "uncompressed y changed", - key: []byte{ - 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, @@ -67,8 +63,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "uncompressed claims compressed", - key: []byte{ - 0x03, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x03, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, @@ -81,8 +76,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "uncompressed as hybrid ok", - key: []byte{ - 0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, @@ -96,8 +90,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "uncompressed as hybrid wrong", - key: []byte{ - 0x06, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x06, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0, @@ -111,8 +104,7 @@ var pubKeyTests = []pubKeyTest{ // from tx 0b09c51c51ff762f00fb26217269d2a18e77a4fa87d69b3c363ab4df16543f20 { name: "compressed ok (ybit = 0)", - key: []byte{ - 0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, + key: []byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1, 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21, 0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d, @@ -123,8 +115,7 @@ var pubKeyTests = []pubKeyTest{ // from tx fdeb8e72524e8dab0da507ddbaf5f88fe4a933eb10a66bc4745bb0aa11ea393c { name: "compressed ok (ybit = 1)", - key: []byte{ - 0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33, + key: []byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33, 0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34, 0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4, 0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e, @@ -134,8 +125,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "compressed claims uncompressed (ybit = 0)", - key: []byte{ - 0x04, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, + key: []byte{0x04, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1, 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21, 0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d, @@ -144,8 +134,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "compressed claims uncompressed (ybit = 1)", - key: []byte{ - 0x05, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33, + key: []byte{0x05, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33, 0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34, 0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4, 0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e, @@ -159,8 +148,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "X == P", - key: []byte{ - 0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F, 0xb2, 0xe0, @@ -173,8 +161,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "X > P", - key: []byte{ - 0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFD, 0x2F, 0xb2, 0xe0, @@ -187,8 +174,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "Y == P", - key: []byte{ - 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF, @@ -201,8 +187,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "Y > P", - key: []byte{ - 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, + key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a, 0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e, 0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca, 0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF, @@ -215,8 +200,7 @@ var pubKeyTests = []pubKeyTest{ }, { name: "hybrid", - key: []byte{ - 0x06, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, + key: []byte{0x06, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb, 0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07, 0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59, 0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, 0x48, 0x3a, @@ -233,20 +217,16 @@ var pubKeyTests = []pubKeyTest{ func TestPubKeys(t *testing.T) { for _, test := range pubKeyTests { pk, err := ParsePubKey(test.key) - if chk.E(err) { + if err != nil { if test.isValid { - t.Errorf( - "%s pubkey failed when shouldn't %v", - test.name, err, - ) + t.Errorf("%s pubkey failed when shouldn't %v", + test.name, err) } continue } if !test.isValid { - t.Errorf( - "%s counted as valid when it should fail", - test.name, - ) + t.Errorf("%s counted as valid when it should fail", + test.name) continue } var pkStr []byte @@ -259,10 +239,8 @@ func TestPubKeys(t *testing.T) { pkStr = test.key } if !bytes.Equal(test.key, pkStr) { - t.Errorf( - "%s pubkey: serialized keys do not match.", - test.name, - ) + t.Errorf("%s pubkey: serialized keys do not match.", + test.name) spew.Dump(test.key) spew.Dump(pkStr) } @@ -271,38 +249,32 @@ func TestPubKeys(t *testing.T) { func TestPublicKeyIsEqual(t *testing.T) { pubKey1, err := ParsePubKey( - []byte{ - 0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33, + []byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33, 0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34, 0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4, 0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e, }, ) - if chk.E(err) { + if err != nil { t.Fatalf("failed to parse raw bytes for pubKey1: %v", err) } pubKey2, err := ParsePubKey( - []byte{ - 0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, + []byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b, 0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1, 0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21, 0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d, }, ) - if chk.E(err) { + if err != nil { t.Fatalf("failed to parse raw bytes for pubKey2: %v", err) } if !pubKey1.IsEqual(pubKey1) { - t.Fatalf( - "value of IsEqual is incorrect, %v is "+ - "equal to %v", pubKey1, pubKey1, - ) + t.Fatalf("value of IsEqual is incorrect, %v is "+ + "equal to %v", pubKey1, pubKey1) } if pubKey1.IsEqual(pubKey2) { - t.Fatalf( - "value of IsEqual is incorrect, %v is not "+ - "equal to %v", pubKey1, pubKey2, - ) + t.Fatalf("value of IsEqual is incorrect, %v is not "+ + "equal to %v", pubKey1, pubKey2) } } @@ -311,11 +283,9 @@ func TestIsCompressed(t *testing.T) { isCompressed := IsCompressedPubKey(test.key) wantCompressed := (test.format == pubkeyCompressed) if isCompressed != wantCompressed { - t.Fatalf( - "%s (%x) pubkey: unexpected compressed result, "+ - "got %v, want %v", test.name, test.key, - isCompressed, wantCompressed, - ) + t.Fatalf("%s (%x) pubkey: unexpected compressed result, "+ + "got %v, want %v", test.name, test.key, + isCompressed, wantCompressed) } } } diff --git a/ec/schnorr/bench_test.go b/ec/schnorr/bench_test.go index fef6100..bb05584 100644 --- a/ec/schnorr/bench_test.go +++ b/ec/schnorr/bench_test.go @@ -9,12 +9,10 @@ import ( "math/big" "testing" - "github.com/minio/sha256-simd" - "orly.dev/chk" - "orly.dev/ec" "orly.dev/ec/secp256k1" "orly.dev/hex" + "orly.dev/sha256" ) // hexToBytes converts the passed hex string into bytes and will panic if there @@ -23,7 +21,7 @@ import ( // hard-coded values. func hexToBytes(s string) []byte { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } return b @@ -35,7 +33,7 @@ func hexToBytes(s string) []byte { // must only) be called with hard-coded values. func hexToModNScalar(s string) *btcec.ModNScalar { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var scalar btcec.ModNScalar @@ -51,7 +49,7 @@ func hexToModNScalar(s string) *btcec.ModNScalar { // called with hard-coded values. func hexToFieldVal(s string) *btcec.FieldVal { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var f btcec.FieldVal @@ -113,7 +111,7 @@ func BenchmarkSigVerify(b *testing.B) { // Double sha256 of by{0x01, 0x02, 0x03, 0x04} msgHash := sha256.Sum256([]byte("benchmark")) sig, err := Sign(privKey, msgHash[:]) - if chk.E(err) { + if err != nil { b.Fatalf("unable to sign: %v", err) } if !sig.Verify(msgHash[:], pubKey) { diff --git a/ec/schnorr/signature.go b/ec/schnorr/signature.go index 670f386..2b4abfc 100644 --- a/ec/schnorr/signature.go +++ b/ec/schnorr/signature.go @@ -4,8 +4,8 @@ package schnorr import ( "fmt" - "orly.dev/chk" + "orly.dev/ec" "orly.dev/ec/chainhash" "orly.dev/ec/secp256k1" @@ -143,7 +143,7 @@ func schnorrVerify(sig *Signature, hash []byte, pubKeyBytes []byte) error { // // Fail if P is not a point on the curve pubKey, err := ParsePubKey(pubKeyBytes) - if chk.E(err) { + if err != nil { return err } if !pubKey.IsOnCurve() { @@ -482,7 +482,7 @@ func Sign( } sig, err := schnorrSign(&privKeyScalar, &kPrime, pub, hash, opts) kPrime.Zero() - if chk.E(err) { + if err != nil { return nil, err } return sig, nil @@ -503,7 +503,7 @@ func Sign( // Steps 10-15. sig, err := schnorrSign(&privKeyScalar, k, pub, hash, opts) k.Zero() - if chk.E(err) { + if err != nil { // Try again with a new nonce. continue } diff --git a/ec/schnorr/signature_test.go b/ec/schnorr/signature_test.go index 49a7d14..36dafb4 100644 --- a/ec/schnorr/signature_test.go +++ b/ec/schnorr/signature_test.go @@ -7,13 +7,13 @@ package schnorr import ( "errors" + "orly.dev/chk" "strings" "testing" "testing/quick" "github.com/davecgh/go-spew/spew" - "orly.dev/chk" "orly.dev/ec" "orly.dev/ec/secp256k1" "orly.dev/hex" @@ -192,7 +192,7 @@ var bip340TestVectors = []bip340Test{ // the only way it can fail is if there is an error in the test source code. func decodeHex(hexStr string) []byte { b, err := hex.Dec(hexStr) - if chk.E(err) { + if err != nil { panic( "invalid hex string in test source: err " + err.Error() + ", hex: " + hexStr, @@ -218,7 +218,7 @@ func TestSchnorrSign(t *testing.T) { signOpts = []SignOption{CustomNonce(auxBytes)} } sig, err := Sign(privKey, msg, signOpts...) - if chk.E(err) { + if err != nil { t.Fatalf("test #%v: sig generation failed: %v", i+1, err) } if strings.ToUpper(hex.Enc(sig.Serialize())) != test.signature { @@ -229,10 +229,10 @@ func TestSchnorrSign(t *testing.T) { } pubKeyBytes := decodeHex(test.publicKey) err = schnorrVerify(sig, msg, pubKeyBytes) - if chk.E(err) { + if err != nil { t.Fail() } - verify := !chk.E(err) + verify := err == nil if test.verifyResult != verify { t.Fatalf( "test #%v: verification mismatch: "+ @@ -248,7 +248,7 @@ func TestSchnorrVerify(t *testing.T) { pubKeyBytes := decodeHex(test.publicKey) _, err := ParsePubKey(pubKeyBytes) switch { - case !test.validPubKey && chk.E(err): + case !test.validPubKey && err != nil: if !errors.Is(err, test.expectErr) { t.Fatalf( "test #%v: pubkey validation should "+ @@ -257,22 +257,22 @@ func TestSchnorrVerify(t *testing.T) { ) } continue - case chk.E(err): + case err != nil: t.Fatalf("test #%v: unable to parse pubkey: %v", i, err) } msg := decodeHex(test.message) sig, err := ParseSignature(decodeHex(test.signature)) - if chk.E(err) { + if err != nil { t.Fatalf("unable to parse sig: %v", err) } err = schnorrVerify(sig, msg, pubKeyBytes) - if chk.E(err) && test.verifyResult { + if err != nil && test.verifyResult { t.Fatalf( "test #%v: verification shouldn't have failed: %v", i+1, err, ) } - verify := !chk.E(err) + verify := err == nil if test.verifyResult != verify { t.Fatalf( "test #%v: verificaiton mismatch: expected "+ @@ -301,7 +301,7 @@ func TestSchnorrSignNoMutate(t *testing.T) { privKey, _ := btcec.SecKeyFromBytes(privBytesCopy[:]) // Generate a signature for secret key with our message. _, err := Sign(privKey, msg[:]) - if chk.E(err) { + if err != nil { t.Logf("unable to gen sig: %v", err) return false } diff --git a/ec/secp256k1/bench_test.go b/ec/secp256k1/bench_test.go index 893a100..6862204 100644 --- a/ec/secp256k1/bench_test.go +++ b/ec/secp256k1/bench_test.go @@ -158,7 +158,7 @@ func BenchmarkParsePubKeyCompressed(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = ParsePubKey(pubKeyBytes) + ParsePubKey(pubKeyBytes) } } @@ -172,6 +172,6 @@ func BenchmarkParsePubKeyUncompressed(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - _, _ = ParsePubKey(pubKeyBytes) + ParsePubKey(pubKeyBytes) } } diff --git a/ec/secp256k1/curve.go b/ec/secp256k1/curve.go index 19c9323..ff808d0 100644 --- a/ec/secp256k1/curve.go +++ b/ec/secp256k1/curve.go @@ -8,7 +8,6 @@ package secp256k1 import ( "math/bits" - "orly.dev/chk" "orly.dev/hex" ) @@ -34,7 +33,7 @@ import ( // called with hard-coded values. func hexToFieldVal(s string) *FieldVal { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var f FieldVal @@ -58,7 +57,7 @@ func hexToModNScalar(s string) *ModNScalar { s = "0" + s } b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } var scalar ModNScalar diff --git a/ec/secp256k1/curve_test.go b/ec/secp256k1/curve_test.go index 1910068..a09dc8b 100644 --- a/ec/secp256k1/curve_test.go +++ b/ec/secp256k1/curve_test.go @@ -10,10 +10,9 @@ import ( "math/big" "math/bits" "math/rand" + "orly.dev/chk" "testing" "time" - - "orly.dev/chk" ) var ( diff --git a/ec/secp256k1/ecdh_test.go b/ec/secp256k1/ecdh_test.go index 03a0954..eb40f2b 100644 --- a/ec/secp256k1/ecdh_test.go +++ b/ec/secp256k1/ecdh_test.go @@ -8,18 +8,16 @@ package secp256k1 import ( "bytes" "testing" - - "orly.dev/chk" ) func TestGenerateSharedSecret(t *testing.T) { secKey1, err := GenerateSecretKey() - if chk.E(err) { + if err != nil { t.Errorf("secret key generation error: %s", err) return } secKey2, err := GenerateSecretKey() - if chk.E(err) { + if err != nil { t.Errorf("secret key generation error: %s", err) return } @@ -28,9 +26,7 @@ func TestGenerateSharedSecret(t *testing.T) { secret1 := GenerateSharedSecret(secKey1, pubKey2) secret2 := GenerateSharedSecret(secKey2, pubKey1) if !bytes.Equal(secret1, secret2) { - t.Errorf( - "ECDH failed, secrets mismatch - first: %x, second: %x", - secret1, secret2, - ) + t.Errorf("ECDH failed, secrets mismatch - first: %x, second: %x", + secret1, secret2) } } diff --git a/ec/secp256k1/ellipticadaptor_test.go b/ec/secp256k1/ellipticadaptor_test.go index 2ab7c5f..b494501 100644 --- a/ec/secp256k1/ellipticadaptor_test.go +++ b/ec/secp256k1/ellipticadaptor_test.go @@ -7,10 +7,9 @@ package secp256k1 import ( "math/big" "math/rand" + "orly.dev/chk" "testing" "time" - - "orly.dev/chk" ) // randBytes returns a byte slice of the required size created from a random diff --git a/ec/secp256k1/example_test.go b/ec/secp256k1/example_test.go index 0471f68..a5e05bb 100644 --- a/ec/secp256k1/example_test.go +++ b/ec/secp256k1/example_test.go @@ -11,11 +11,9 @@ import ( "encoding/binary" "fmt" - "github.com/minio/sha256-simd" - "orly.dev/chk" - "orly.dev/ec/secp256k1" "orly.dev/hex" + "orly.dev/sha256" ) // This example demonstrates use of GenerateSharedSecret to encrypt a message @@ -24,7 +22,7 @@ import ( func Example_encryptDecryptMessage() { newAEAD := func(key []byte) (cipher.AEAD, error) { block, err := aes.NewCipher(key) - if chk.E(err) { + if err != nil { return nil, err } return cipher.NewGCM(block) @@ -34,19 +32,19 @@ func Example_encryptDecryptMessage() { "04115c42e757b2efb7671c578530ec191a1359381e6a71127a9d37c486fd30da" + "e57e76dc58f693bd7e7010358ce6b165e483a2921010db67ac11b1b51b651953d2", ) // uncompressed pubkey - if chk.E(err) { + if err != nil { fmt.Println(err) return } pubKey, err := secp256k1.ParsePubKey(pubKeyBytes) - if chk.E(err) { + if err != nil { fmt.Println(err) return } // Derive an ephemeral public/secret keypair for performing ECDHE with // the recipient. ephemeralSecKey, err := secp256k1.GenerateSecretKey() - if chk.E(err) { + if err != nil { fmt.Println(err) return } @@ -74,7 +72,7 @@ func Example_encryptDecryptMessage() { // first (and only) use of a counter. plaintext := []byte("test message") aead, err := newAEAD(cipherKey[:]) - if chk.E(err) { + if err != nil { fmt.Println(err) return } @@ -90,7 +88,7 @@ func Example_encryptDecryptMessage() { pkBytes, err := hex.Dec( "a11b0a4e1a132305652ee7a8eb7848f6ad5ea381e3ce20a2c086a2e388230811", ) - if chk.E(err) { + if err != nil { fmt.Println(err) return } @@ -101,7 +99,7 @@ func Example_encryptDecryptMessage() { pubKeyLen := binary.LittleEndian.Uint32(ciphertext[:4]) senderPubKeyBytes := ciphertext[4 : 4+pubKeyLen] senderPubKey, err := secp256k1.ParsePubKey(senderPubKeyBytes) - if chk.E(err) { + if err != nil { fmt.Println(err) return } @@ -115,7 +113,7 @@ func Example_encryptDecryptMessage() { ) // Open the sealed message. aead, err = newAEAD(recoveredCipherKey[:]) - if chk.E(err) { + if err != nil { fmt.Println(err) return } @@ -124,7 +122,7 @@ func Example_encryptDecryptMessage() { nil, nonce, ciphertext[4+pubKeyLen:], senderPubKeyBytes, ) - if chk.E(err) { + if err != nil { fmt.Println(err) return } diff --git a/ec/secp256k1/field_test.go b/ec/secp256k1/field_test.go index 5507682..55a04d3 100644 --- a/ec/secp256k1/field_test.go +++ b/ec/secp256k1/field_test.go @@ -11,11 +11,11 @@ import ( "fmt" "math/big" "math/rand" + "orly.dev/chk" "reflect" "testing" "time" - "orly.dev/chk" "orly.dev/hex" ) diff --git a/ec/secp256k1/loadprecomputed.go b/ec/secp256k1/loadprecomputed.go index acbfdb1..fe35a3e 100644 --- a/ec/secp256k1/loadprecomputed.go +++ b/ec/secp256k1/loadprecomputed.go @@ -11,8 +11,6 @@ import ( "io" "strings" "sync" - - "orly.dev/chk" ) //go:generate go run genprecomps.go @@ -52,11 +50,11 @@ var s256BytePoints = func() func() *bytePointTable { // multiplication. decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp)) r, err := zlib.NewReader(decoder) - if chk.E(err) { + if err != nil { panic(err) } serialized, err := io.ReadAll(r) - if chk.E(err) { + if err != nil { panic(err) } // Deserialize the precomputed byte points and set the memory table to diff --git a/ec/secp256k1/modnscalar_test.go b/ec/secp256k1/modnscalar_test.go index 67cf190..ff5a9fe 100644 --- a/ec/secp256k1/modnscalar_test.go +++ b/ec/secp256k1/modnscalar_test.go @@ -9,11 +9,11 @@ import ( "fmt" "math/big" "math/rand" + "orly.dev/chk" "reflect" "testing" "time" - "orly.dev/chk" "orly.dev/hex" ) diff --git a/ec/secp256k1/nonce.go b/ec/secp256k1/nonce.go index b30dd30..e838d11 100644 --- a/ec/secp256k1/nonce.go +++ b/ec/secp256k1/nonce.go @@ -9,7 +9,7 @@ import ( "bytes" "hash" - "github.com/minio/sha256-simd" + "orly.dev/sha256" ) // References: diff --git a/ec/secp256k1/nonce_test.go b/ec/secp256k1/nonce_test.go index 64998a2..c0eccc0 100644 --- a/ec/secp256k1/nonce_test.go +++ b/ec/secp256k1/nonce_test.go @@ -9,10 +9,8 @@ import ( "bytes" "testing" - "github.com/minio/sha256-simd" - "orly.dev/chk" - "orly.dev/hex" + "orly.dev/sha256" ) // hexToBytes converts the passed hex string into bytes and will panic if there @@ -21,7 +19,7 @@ import ( // hard-coded values. func hexToBytes(s string) []byte { b, err := hex.Dec(s) - if chk.E(err) { + if err != nil { panic("invalid hex in source file: " + s) } return b diff --git a/ec/secp256k1/precomps/genprecomps.go b/ec/secp256k1/precomps/genprecomps.go index 733d129..7099704 100644 --- a/ec/secp256k1/precomps/genprecomps.go +++ b/ec/secp256k1/precomps/genprecomps.go @@ -11,11 +11,11 @@ package main import ( "fmt" "math/big" + "orly.dev/chk" + "orly.dev/log" "os" - "orly.dev/chk" "orly.dev/ec/secp256k1" - "orly.dev/log" ) // curveParams houses the secp256k1 curve parameters for convenient access. @@ -192,7 +192,7 @@ func endomorphismVectors(lambda *big.Int) (a1, b1, a2, b2 *big.Int) { } // deriveEndomorphismParams calculates and returns parameters needed to make use -// of the secp256k1 endomorphism. TODO: this is unused +// of the secp256k1 endomorphism. func deriveEndomorphismParams() [2]endomorphismParams { // roots returns the solutions of the characteristic polynomial of the // secp256k1 endomorphism. @@ -321,12 +321,12 @@ func main() { } serialized := serializedBytePoints() embedded, err := os.Create("secp256k1/rawbytepoints.bin") - if chk.E(err) { + if err != nil { log.F.Ln(err) os.Exit(1) } n, err := embedded.Write(serialized) - if chk.E(err) { + if err != nil { panic(err) } if n != len(serialized) { diff --git a/ec/secp256k1/pubkey_test.go b/ec/secp256k1/pubkey_test.go index df83000..6480d57 100644 --- a/ec/secp256k1/pubkey_test.go +++ b/ec/secp256k1/pubkey_test.go @@ -9,8 +9,6 @@ import ( "bytes" "errors" "testing" - - "orly.dev/chk" ) // TestParsePubKey ensures that public keys are properly parsed according @@ -22,217 +20,209 @@ func TestParsePubKey(t *testing.T) { err error // expected error wantX string // expected x coordinate wantY string // expected y coordinate - }{ - { - name: "uncompressed ok", - key: "04" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - err: nil, - wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - }, { - name: "uncompressed x changed (not on curve)", - key: "04" + - "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - err: ErrPubKeyNotOnCurve, - }, { - name: "uncompressed y changed (not on curve)", - key: "04" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", - err: ErrPubKeyNotOnCurve, - }, { - name: "uncompressed claims compressed", - key: "03" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - err: ErrPubKeyInvalidFormat, - }, { - name: "uncompressed as hybrid ok (ybit = 0)", - key: "06" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", - err: nil, - wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - wantY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", - }, { - name: "uncompressed as hybrid ok (ybit = 1)", - key: "07" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - err: nil, - wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - }, { - name: "uncompressed as hybrid wrong oddness", - key: "06" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - err: ErrPubKeyMismatchedOddness, - }, { - name: "compressed ok (ybit = 0)", - key: "02" + - "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", - err: nil, - wantX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", - wantY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032", - }, { - name: "compressed ok (ybit = 1)", - key: "03" + - "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", - err: nil, - wantX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", - wantY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f", - }, { - name: "compressed claims uncompressed (ybit = 0)", - key: "04" + - "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", - err: ErrPubKeyInvalidFormat, - }, { - name: "compressed claims uncompressed (ybit = 1)", - key: "04" + - "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", - err: ErrPubKeyInvalidFormat, - }, { - name: "compressed claims hybrid (ybit = 0)", - key: "06" + - "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", - err: ErrPubKeyInvalidFormat, - }, { - name: "compressed claims hybrid (ybit = 1)", - key: "07" + - "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", - err: ErrPubKeyInvalidFormat, - }, { - name: "compressed with invalid x coord (ybit = 0)", - key: "03" + - "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c", - err: ErrPubKeyNotOnCurve, - }, { - name: "compressed with invalid x coord (ybit = 1)", - key: "03" + - "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d", - err: ErrPubKeyNotOnCurve, - }, { - name: "empty", - key: "", - err: ErrPubKeyInvalidLen, - }, { - name: "wrong length", - key: "05", - err: ErrPubKeyInvalidLen, - }, { - name: "uncompressed x == p", - key: "04" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - err: ErrPubKeyXTooBig, - }, { - // The y coordinate produces a valid point for x == 1 (mod p), but it - // should fail to parse instead of wrapping around. - name: "uncompressed x > p (p + 1 -- aka 1)", - key: "04" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" + - "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441", - err: ErrPubKeyXTooBig, - }, { - name: "uncompressed y == p", - key: "04" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - err: ErrPubKeyYTooBig, - }, { - // The x coordinate produces a valid point for y == 1 (mod p), but it - // should fail to parse instead of wrapping around. - name: "uncompressed y > p (p + 1 -- aka 1)", - key: "04" + - "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - err: ErrPubKeyYTooBig, - }, { - name: "compressed x == p (ybit = 0)", - key: "02" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - err: ErrPubKeyXTooBig, - }, { - name: "compressed x == p (ybit = 1)", - key: "03" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - err: ErrPubKeyXTooBig, - }, { - // This would be valid for x == 2 (mod p), but it should fail to parse - // instead of wrapping around. - name: "compressed x > p (p + 2 -- aka 2) (ybit = 0)", - key: "02" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc31", - err: ErrPubKeyXTooBig, - }, { - // This would be valid for x == 1 (mod p), but it should fail to parse - // instead of wrapping around. - name: "compressed x > p (p + 1 -- aka 1) (ybit = 1)", - key: "03" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - err: ErrPubKeyXTooBig, - }, { - name: "hybrid x == p (ybit = 1)", - key: "07" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - err: ErrPubKeyXTooBig, - }, { - // The y coordinate produces a valid point for x == 1 (mod p), but it - // should fail to parse instead of wrapping around. - name: "hybrid x > p (p + 1 -- aka 1) (ybit = 0)", - key: "06" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" + - "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441", - err: ErrPubKeyXTooBig, - }, { - name: "hybrid y == p (ybit = 0 when mod p)", - key: "06" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", - err: ErrPubKeyYTooBig, - }, { - // The x coordinate produces a valid point for y == 1 (mod p), but it - // should fail to parse instead of wrapping around. - name: "hybrid y > p (p + 1 -- aka 1) (ybit = 1 when mod p)", - key: "07" + - "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" + - "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", - err: ErrPubKeyYTooBig, - }, - } + }{{ + name: "uncompressed ok", + key: "04" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + err: nil, + wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + }, { + name: "uncompressed x changed (not on curve)", + key: "04" + + "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + err: ErrPubKeyNotOnCurve, + }, { + name: "uncompressed y changed (not on curve)", + key: "04" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", + err: ErrPubKeyNotOnCurve, + }, { + name: "uncompressed claims compressed", + key: "03" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + err: ErrPubKeyInvalidFormat, + }, { + name: "uncompressed as hybrid ok (ybit = 0)", + key: "06" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", + err: nil, + wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + wantY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", + }, { + name: "uncompressed as hybrid ok (ybit = 1)", + key: "07" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + err: nil, + wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + }, { + name: "uncompressed as hybrid wrong oddness", + key: "06" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + err: ErrPubKeyMismatchedOddness, + }, { + name: "compressed ok (ybit = 0)", + key: "02" + + "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", + err: nil, + wantX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", + wantY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032", + }, { + name: "compressed ok (ybit = 1)", + key: "03" + + "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", + err: nil, + wantX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", + wantY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f", + }, { + name: "compressed claims uncompressed (ybit = 0)", + key: "04" + + "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", + err: ErrPubKeyInvalidFormat, + }, { + name: "compressed claims uncompressed (ybit = 1)", + key: "04" + + "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", + err: ErrPubKeyInvalidFormat, + }, { + name: "compressed claims hybrid (ybit = 0)", + key: "06" + + "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", + err: ErrPubKeyInvalidFormat, + }, { + name: "compressed claims hybrid (ybit = 1)", + key: "07" + + "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", + err: ErrPubKeyInvalidFormat, + }, { + name: "compressed with invalid x coord (ybit = 0)", + key: "03" + + "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c", + err: ErrPubKeyNotOnCurve, + }, { + name: "compressed with invalid x coord (ybit = 1)", + key: "03" + + "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d", + err: ErrPubKeyNotOnCurve, + }, { + name: "empty", + key: "", + err: ErrPubKeyInvalidLen, + }, { + name: "wrong length", + key: "05", + err: ErrPubKeyInvalidLen, + }, { + name: "uncompressed x == p", + key: "04" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + err: ErrPubKeyXTooBig, + }, { + // The y coordinate produces a valid point for x == 1 (mod p), but it + // should fail to parse instead of wrapping around. + name: "uncompressed x > p (p + 1 -- aka 1)", + key: "04" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" + + "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441", + err: ErrPubKeyXTooBig, + }, { + name: "uncompressed y == p", + key: "04" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + err: ErrPubKeyYTooBig, + }, { + // The x coordinate produces a valid point for y == 1 (mod p), but it + // should fail to parse instead of wrapping around. + name: "uncompressed y > p (p + 1 -- aka 1)", + key: "04" + + "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + err: ErrPubKeyYTooBig, + }, { + name: "compressed x == p (ybit = 0)", + key: "02" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + err: ErrPubKeyXTooBig, + }, { + name: "compressed x == p (ybit = 1)", + key: "03" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + err: ErrPubKeyXTooBig, + }, { + // This would be valid for x == 2 (mod p), but it should fail to parse + // instead of wrapping around. + name: "compressed x > p (p + 2 -- aka 2) (ybit = 0)", + key: "02" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc31", + err: ErrPubKeyXTooBig, + }, { + // This would be valid for x == 1 (mod p), but it should fail to parse + // instead of wrapping around. + name: "compressed x > p (p + 1 -- aka 1) (ybit = 1)", + key: "03" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + err: ErrPubKeyXTooBig, + }, { + name: "hybrid x == p (ybit = 1)", + key: "07" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + err: ErrPubKeyXTooBig, + }, { + // The y coordinate produces a valid point for x == 1 (mod p), but it + // should fail to parse instead of wrapping around. + name: "hybrid x > p (p + 1 -- aka 1) (ybit = 0)", + key: "06" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" + + "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441", + err: ErrPubKeyXTooBig, + }, { + name: "hybrid y == p (ybit = 0 when mod p)", + key: "06" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + err: ErrPubKeyYTooBig, + }, { + // The x coordinate produces a valid point for y == 1 (mod p), but it + // should fail to parse instead of wrapping around. + name: "hybrid y > p (p + 1 -- aka 1) (ybit = 1 when mod p)", + key: "07" + + "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" + + "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30", + err: ErrPubKeyYTooBig, + }} for _, test := range tests { pubKeyBytes := hexToBytes(test.key) pubKey, err := ParsePubKey(pubKeyBytes) if !errors.Is(err, test.err) { - t.Errorf( - "%s mismatched e -- got %v, want %v", test.name, err, - test.err, - ) + t.Errorf("%s mismatched e -- got %v, want %v", test.name, err, + test.err) continue } - if chk.E(err) { + if err != nil { continue } // Ensure the x and y coordinates match the expected values upon // successful parse. wantX, wantY := hexToFieldVal(test.wantX), hexToFieldVal(test.wantY) if !pubKey.x.Equals(wantX) { - t.Errorf( - "%s: mismatched x coordinate -- got %v, want %v", - test.name, pubKey.x, wantX, - ) + t.Errorf("%s: mismatched x coordinate -- got %v, want %v", + test.name, pubKey.x, wantX) continue } if !pubKey.y.Equals(wantY) { - t.Errorf( - "%s: mismatched y coordinate -- got %v, want %v", - test.name, pubKey.y, wantY, - ) + t.Errorf("%s: mismatched y coordinate -- got %v, want %v", + test.name, pubKey.y, wantY) continue } } @@ -247,81 +237,79 @@ func TestPubKeySerialize(t *testing.T) { pubY string // hex encoded y coordinate for pubkey to serialize compress bool // whether to serialize compressed or uncompressed expected string // hex encoded expected pubkey serialization - }{ - { - name: "uncompressed (ybit = 0)", - pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", - compress: false, - expected: "04" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", - }, { - name: "uncompressed (ybit = 1)", - pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - compress: false, - expected: "04" + - "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - }, { - // It's invalid to parse pubkeys that are not on the curve, however it - // is possible to manually create them and they should serialize - // correctly. - name: "uncompressed not on the curve due to x coord", - pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - compress: false, - expected: "04" + - "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - }, { - // It's invalid to parse pubkeys that are not on the curve, however it - // is possible to manually create them and they should serialize - // correctly. - name: "uncompressed not on the curve due to y coord", - pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", - compress: false, - expected: "04" + - "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + - "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", - }, { - name: "compressed (ybit = 0)", - pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", - pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032", - compress: true, - expected: "02" + - "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", - }, { - name: "compressed (ybit = 1)", - pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", - pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f", - compress: true, - expected: "03" + - "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", - }, { - // It's invalid to parse pubkeys that are not on the curve, however it - // is possible to manually create them and they should serialize - // correctly. - name: "compressed not on curve (ybit = 0)", - pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c", - pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032", - compress: true, - expected: "02" + - "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c", - }, { - // It's invalid to parse pubkeys that are not on the curve, however it - // is possible to manually create them and they should serialize - // correctly. - name: "compressed not on curve (ybit = 1)", - pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d", - pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f", - compress: true, - expected: "03" + - "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d", - }, - } + }{{ + name: "uncompressed (ybit = 0)", + pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", + compress: false, + expected: "04" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", + }, { + name: "uncompressed (ybit = 1)", + pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + compress: false, + expected: "04" + + "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + }, { + // It's invalid to parse pubkeys that are not on the curve, however it + // is possible to manually create them and they should serialize + // correctly. + name: "uncompressed not on the curve due to x coord", + pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + compress: false, + expected: "04" + + "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + }, { + // It's invalid to parse pubkeys that are not on the curve, however it + // is possible to manually create them and they should serialize + // correctly. + name: "uncompressed not on the curve due to y coord", + pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", + compress: false, + expected: "04" + + "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" + + "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", + }, { + name: "compressed (ybit = 0)", + pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", + pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032", + compress: true, + expected: "02" + + "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d", + }, { + name: "compressed (ybit = 1)", + pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", + pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f", + compress: true, + expected: "03" + + "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e", + }, { + // It's invalid to parse pubkeys that are not on the curve, however it + // is possible to manually create them and they should serialize + // correctly. + name: "compressed not on curve (ybit = 0)", + pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c", + pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032", + compress: true, + expected: "02" + + "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c", + }, { + // It's invalid to parse pubkeys that are not on the curve, however it + // is possible to manually create them and they should serialize + // correctly. + name: "compressed not on curve (ybit = 1)", + pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d", + pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f", + compress: true, + expected: "03" + + "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d", + }} for _, test := range tests { // Parse the test data. x, y := hexToFieldVal(test.pubX), hexToFieldVal(test.pubY) @@ -336,10 +324,8 @@ func TestPubKeySerialize(t *testing.T) { } expected := hexToBytes(test.expected) if !bytes.Equal(serialized, expected) { - t.Errorf( - "%s: mismatched serialized public key -- got %x, want %x", - test.name, serialized, expected, - ) + t.Errorf("%s: mismatched serialized public key -- got %x, want %x", + test.name, serialized, expected) continue } } @@ -362,23 +348,17 @@ func TestPublicKeyIsEqual(t *testing.T) { } if !pubKey1.IsEqual(pubKey1) { - t.Fatalf( - "bad self public key equality check: (%v, %v)", pubKey1.x, - pubKey1.y, - ) + t.Fatalf("bad self public key equality check: (%v, %v)", pubKey1.x, + pubKey1.y) } if !pubKey1.IsEqual(pubKey1Copy) { - t.Fatalf( - "bad public key equality check: (%v, %v) == (%v, %v)", - pubKey1.x, pubKey1.y, pubKey1Copy.x, pubKey1Copy.y, - ) + t.Fatalf("bad public key equality check: (%v, %v) == (%v, %v)", + pubKey1.x, pubKey1.y, pubKey1Copy.x, pubKey1Copy.y) } if pubKey1.IsEqual(pubKey2) { - t.Fatalf( - "bad public key equality check: (%v, %v) != (%v, %v)", - pubKey1.x, pubKey1.y, pubKey2.x, pubKey2.y, - ) + t.Fatalf("bad public key equality check: (%v, %v) != (%v, %v)", + pubKey1.x, pubKey1.y, pubKey2.x, pubKey2.y) } } @@ -390,31 +370,29 @@ func TestPublicKeyAsJacobian(t *testing.T) { pubKey string // hex encoded serialized compressed pubkey wantX string // hex encoded expected X coordinate wantY string // hex encoded expected Y coordinate - }{ - { - name: "public key for secret key 0x01", - pubKey: "0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", - wantX: "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", - wantY: "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", - }, { - name: "public for secret key 0x03", - pubKey: "02f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9", - wantX: "f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9", - wantY: "388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672", - }, { - name: "public for secret key 0x06", - pubKey: "03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556", - wantX: "fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556", - wantY: "ae12777aacfbb620f3be96017f45c560de80f0f6518fe4a03c870c36b075f297", - }, - } + }{{ + name: "public key for secret key 0x01", + pubKey: "0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + wantX: "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + wantY: "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8", + }, { + name: "public for secret key 0x03", + pubKey: "02f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9", + wantX: "f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9", + wantY: "388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672", + }, { + name: "public for secret key 0x06", + pubKey: "03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556", + wantX: "fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556", + wantY: "ae12777aacfbb620f3be96017f45c560de80f0f6518fe4a03c870c36b075f297", + }} for _, test := range tests { // Parse the test data. pubKeyBytes := hexToBytes(test.pubKey) wantX := hexToFieldVal(test.wantX) wantY := hexToFieldVal(test.wantY) pubKey, err := ParsePubKey(pubKeyBytes) - if chk.E(err) { + if err != nil { t.Errorf("%s: failed to parse public key: %v", test.name, err) continue } @@ -423,24 +401,18 @@ func TestPublicKeyAsJacobian(t *testing.T) { var point JacobianPoint pubKey.AsJacobian(&point) if !point.Z.IsOne() { - t.Errorf( - "%s: invalid Z coordinate -- got %v, want 1", test.name, - point.Z, - ) + t.Errorf("%s: invalid Z coordinate -- got %v, want 1", test.name, + point.Z) continue } if !point.X.Equals(wantX) { - t.Errorf( - "%s: invalid X coordinate - got %v, want %v", test.name, - point.X, wantX, - ) + t.Errorf("%s: invalid X coordinate - got %v, want %v", test.name, + point.X, wantX) continue } if !point.Y.Equals(wantY) { - t.Errorf( - "%s: invalid Y coordinate - got %v, want %v", test.name, - point.Y, wantY, - ) + t.Errorf("%s: invalid Y coordinate - got %v, want %v", test.name, + point.Y, wantY) continue } } @@ -454,29 +426,27 @@ func TestPublicKeyIsOnCurve(t *testing.T) { pubX string // hex encoded x coordinate for pubkey to serialize pubY string // hex encoded y coordinate for pubkey to serialize want bool // expected result - }{ - { - name: "valid with even y", - pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", - want: true, - }, { - name: "valid with odd y", - pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - want: true, - }, { - name: "invalid due to x coord", - pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", - want: false, - }, { - name: "invalid due to y coord", - pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", - pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", - want: false, - }, - } + }{{ + name: "valid with even y", + pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c", + want: true, + }, { + name: "valid with odd y", + pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + want: true, + }, { + name: "invalid due to x coord", + pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3", + want: false, + }, { + name: "invalid due to y coord", + pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c", + pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4", + want: false, + }} for _, test := range tests { // Parse the test data. x, y := hexToFieldVal(test.pubX), hexToFieldVal(test.pubY) @@ -484,10 +454,8 @@ func TestPublicKeyIsOnCurve(t *testing.T) { result := pubKey.IsOnCurve() if result != test.want { - t.Errorf( - "%s: mismatched is on curve result -- got %v, want %v", - test.name, result, test.want, - ) + t.Errorf("%s: mismatched is on curve result -- got %v, want %v", + test.name, result, test.want) continue } } diff --git a/ec/secp256k1/seckey.go b/ec/secp256k1/seckey.go index f340ea4..5c628a8 100644 --- a/ec/secp256k1/seckey.go +++ b/ec/secp256k1/seckey.go @@ -8,7 +8,6 @@ package secp256k1 import ( "crypto/rand" "io" - "orly.dev/chk" ) diff --git a/ec/secp256k1/seckey_bench_test.go b/ec/secp256k1/seckey_bench_test.go index 2895576..bbede9a 100644 --- a/ec/secp256k1/seckey_bench_test.go +++ b/ec/secp256k1/seckey_bench_test.go @@ -6,8 +6,6 @@ package secp256k1 import ( "testing" - - "orly.dev/chk" ) // BenchmarkSecretKeyGenerate benchmarks generating new cryptographically @@ -17,7 +15,7 @@ func BenchmarkSecretKeyGenerate(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { _, err := GenerateSecretKey() - if chk.E(err) { + if err != nil { b.Fatal(err) } } diff --git a/ec/secp256k1/seckey_test.go b/ec/secp256k1/seckey_test.go index 4899116..08cc3ad 100644 --- a/ec/secp256k1/seckey_test.go +++ b/ec/secp256k1/seckey_test.go @@ -11,14 +11,12 @@ import ( "errors" "math/big" "testing" - - "orly.dev/chk" ) // TestGenerateSecretKey ensures the key generation works as expected. func TestGenerateSecretKey(t *testing.T) { sec, err := GenerateSecretKey() - if chk.E(err) { + if err != nil { t.Errorf("failed to generate secret key: %s", err) return } @@ -32,7 +30,7 @@ func TestGenerateSecretKey(t *testing.T) { // entropy source works as expected. func TestGenerateSecretKeyFromRand(t *testing.T) { sec, err := GenerateSecretKeyFromRand(rand.Reader) - if chk.E(err) { + if err != nil { t.Errorf("failed to generate secret key: %s", err) return } @@ -63,35 +61,31 @@ func TestGenerateSecretKeyCorners(t *testing.T) { // 4th invocation: 1 (32-byte big endian) oneModN := hexToModNScalar("01") var numReads int - mockReader := mockSecretKeyReaderFunc( - func(p []byte) (int, error) { - numReads++ - switch numReads { - case 1: - return copy(p, bytes.Repeat([]byte{0x00}, len(p))), nil - case 2: - return copy(p, curveParams.N.Bytes()), nil - case 3: - nPlusOne := new(big.Int).Add(curveParams.N, big.NewInt(1)) - return copy(p, nPlusOne.Bytes()), nil - } - oneModNBytes := oneModN.Bytes() - return copy(p, oneModNBytes[:]), nil - }, - ) + mockReader := mockSecretKeyReaderFunc(func(p []byte) (int, error) { + numReads++ + switch numReads { + case 1: + return copy(p, bytes.Repeat([]byte{0x00}, len(p))), nil + case 2: + return copy(p, curveParams.N.Bytes()), nil + case 3: + nPlusOne := new(big.Int).Add(curveParams.N, big.NewInt(1)) + return copy(p, nPlusOne.Bytes()), nil + } + oneModNBytes := oneModN.Bytes() + return copy(p, oneModNBytes[:]), nil + }) // Generate a secret key using the mock reader and ensure the resulting key // is the expected one. It should be the value "1" since the other values // the sequence produces are invalid and thus should be rejected. sec, err := GenerateSecretKeyFromRand(mockReader) - if chk.E(err) { + if err != nil { t.Errorf("failed to generate secret key: %s", err) return } if !sec.Key.Equals(oneModN) { - t.Fatalf( - "unexpected secret key -- got: %x, want %x", sec.Serialize(), - oneModN.Bytes(), - ) + t.Fatalf("unexpected secret key -- got: %x, want %x", sec.Serialize(), + oneModN.Bytes()) } } @@ -100,11 +94,9 @@ func TestGenerateSecretKeyCorners(t *testing.T) { func TestGenerateSecretKeyError(t *testing.T) { // Create a mock reader that returns an error. errDisabled := errors.New("disabled") - mockReader := mockSecretKeyReaderFunc( - func(p []byte) (int, error) { - return 0, errDisabled - }, - ) + mockReader := mockSecretKeyReaderFunc(func(p []byte) (int, error) { + return 0, errDisabled + }) // Generate a secret key using the mock reader and ensure the expected // error is returned. _, err := GenerateSecretKeyFromRand(mockReader) @@ -121,17 +113,15 @@ func TestSecKeys(t *testing.T) { name string sec string // hex encoded secret key to test pub string // expected hex encoded serialized compressed public key - }{ - { - name: "random secret key 1", - sec: "eaf02ca348c524e6392655ba4d29603cd1a7347d9d65cfe93ce1ebffdca22694", - pub: "025ceeba2ab4a635df2c0301a3d773da06ac5a18a7c3e0d09a795d7e57d233edf1", - }, { - name: "random secret key 2", - sec: "24b860d0651db83feba821e7a94ba8b87162665509cefef0cbde6a8fbbedfe7c", - pub: "032a6e51bf218085647d330eac2fafaeee07617a777ad9e8e7141b4cdae92cb637", - }, - } + }{{ + name: "random secret key 1", + sec: "eaf02ca348c524e6392655ba4d29603cd1a7347d9d65cfe93ce1ebffdca22694", + pub: "025ceeba2ab4a635df2c0301a3d773da06ac5a18a7c3e0d09a795d7e57d233edf1", + }, { + name: "random secret key 2", + sec: "24b860d0651db83feba821e7a94ba8b87162665509cefef0cbde6a8fbbedfe7c", + pub: "032a6e51bf218085647d330eac2fafaeee07617a777ad9e8e7141b4cdae92cb637", + }} for _, test := range tests { // Parse test data. @@ -143,18 +133,14 @@ func TestSecKeys(t *testing.T) { serializedPubKey := pub.SerializeCompressed() if !bytes.Equal(serializedPubKey, wantPubKeyBytes) { - t.Errorf( - "%s unexpected serialized public key - got: %x, want: %x", - test.name, serializedPubKey, wantPubKeyBytes, - ) + t.Errorf("%s unexpected serialized public key - got: %x, want: %x", + test.name, serializedPubKey, wantPubKeyBytes) } serializedSecKey := sec.Serialize() if !bytes.Equal(serializedSecKey, secKeyBytes) { - t.Errorf( - "%s unexpected serialized secret key - got: %x, want: %x", - test.name, serializedSecKey, secKeyBytes, - ) + t.Errorf("%s unexpected serialized secret key - got: %x, want: %x", + test.name, serializedSecKey, secKeyBytes) } } } diff --git a/ec/secp256k1/util_test.go b/ec/secp256k1/util_test.go new file mode 100644 index 0000000..8ab9b0b --- /dev/null +++ b/ec/secp256k1/util_test.go @@ -0,0 +1,9 @@ +package secp256k1_test + +import ( + "orly.dev/lol" +) + +var ( + log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf +) diff --git a/ec/taproot/taproot.go b/ec/taproot/taproot.go index 2909902..032a837 100644 --- a/ec/taproot/taproot.go +++ b/ec/taproot/taproot.go @@ -6,8 +6,8 @@ import ( "bytes" "errors" "fmt" - "orly.dev/chk" + "orly.dev/ec/bech32" "orly.dev/ec/chaincfg" ) diff --git a/ec/util_test.go b/ec/util_test.go new file mode 100644 index 0000000..bcbd11a --- /dev/null +++ b/ec/util_test.go @@ -0,0 +1,9 @@ +package btcec_test + +import ( + "orly.dev/lol" +) + +var ( + log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf +) diff --git a/encryption/README.md b/encryption/README.md new file mode 100644 index 0000000..2ca8f06 --- /dev/null +++ b/encryption/README.md @@ -0,0 +1 @@ +Code copied from https://github.com/paulmillr/nip44/tree/e7aed61aaf77240ac10c325683eed14b22e7950f/go. diff --git a/encryption/doc.go b/encryption/doc.go new file mode 100644 index 0000000..07bdbbb --- /dev/null +++ b/encryption/doc.go @@ -0,0 +1,3 @@ +// Package encryption contains the message encryption schemes defined in NIP-04 +// and NIP-44, used for encrypting the content of nostr messages. +package encryption diff --git a/encryption/nip4.go b/encryption/nip4.go new file mode 100644 index 0000000..1ab585d --- /dev/null +++ b/encryption/nip4.go @@ -0,0 +1,116 @@ +package encryption + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "encoding/base64" + "orly.dev/chk" + "orly.dev/errorf" + "strings" + + "lukechampine.com/frand" + + "orly.dev/hex" + "orly.dev/p256k" +) + +// ComputeSharedSecret returns a shared secret key used to encrypt messages. The private and public keys should be hex +// encoded. Uses the Diffie-Hellman key exchange (ECDH) (RFC 4753). +func ComputeSharedSecret(pkh, skh string) (sharedSecret []byte, err error) { + var skb, pkb []byte + if skb, err = hex.Dec(skh); chk.E(err) { + return + } + if pkb, err = hex.Dec(pkh); chk.E(err) { + return + } + signer := new(p256k.Signer) + if err = signer.InitSec(skb); chk.E(err) { + return + } + if sharedSecret, err = signer.ECDH(pkb); chk.E(err) { + return + } + return +} + +// EncryptNip4 encrypts message with key using aes-256-cbc. key should be the shared secret generated by +// ComputeSharedSecret. +// +// Returns: base64(encrypted_bytes) + "?iv=" + base64(initialization_vector). +// +// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm. +func EncryptNip4(msg string, key []byte) (ct []byte, err error) { + // block size is 16 bytes + iv := make([]byte, 16) + if _, err = frand.Read(iv); chk.E(err) { + err = errorf.E("error creating initialization vector: %w", err) + return + } + // automatically picks aes-256 based on key length (32 bytes) + var block cipher.Block + if block, err = aes.NewCipher(key); chk.E(err) { + err = errorf.E("error creating block cipher: %w", err) + return + } + mode := cipher.NewCBCEncrypter(block, iv) + plaintext := []byte(msg) + // add padding + base := len(plaintext) + // this will be a number between 1 and 16 (inclusive), never 0 + bs := block.BlockSize() + padding := bs - base%bs + // encode the padding in all the padding bytes themselves + padText := bytes.Repeat([]byte{byte(padding)}, padding) + paddedMsgBytes := append(plaintext, padText...) + ciphertext := make([]byte, len(paddedMsgBytes)) + mode.CryptBlocks(ciphertext, paddedMsgBytes) + return []byte(base64.StdEncoding.EncodeToString(ciphertext) + "?iv=" + + base64.StdEncoding.EncodeToString(iv)), nil +} + +// DecryptNip4 decrypts a content string using the shared secret key. The inverse operation to message -> +// EncryptNip4(message, key). +// +// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm. +func DecryptNip4(content string, key []byte) (msg []byte, err error) { + parts := strings.Split(content, "?iv=") + if len(parts) < 2 { + return nil, errorf.E( + "error parsing encrypted message: no initialization vector", + ) + } + var ciphertext []byte + if ciphertext, err = base64.StdEncoding.DecodeString(parts[0]); chk.E(err) { + err = errorf.E("error decoding ciphertext from base64: %w", err) + return + } + var iv []byte + if iv, err = base64.StdEncoding.DecodeString(parts[1]); chk.E(err) { + err = errorf.E("error decoding iv from base64: %w", err) + return + } + var block cipher.Block + if block, err = aes.NewCipher(key); chk.E(err) { + err = errorf.E("error creating block cipher: %w", err) + return + } + mode := cipher.NewCBCDecrypter(block, iv) + msg = make([]byte, len(ciphertext)) + mode.CryptBlocks(msg, ciphertext) + // remove padding + var ( + plaintextLen = len(msg) + ) + if plaintextLen > 0 { + // the padding amount is encoded in the padding bytes themselves + padding := int(msg[plaintextLen-1]) + if padding > plaintextLen { + err = errorf.E("invalid padding amount: %d", padding) + return + } + msg = msg[0 : plaintextLen-padding] + } + return msg, nil +} diff --git a/encryption/nip44.go b/encryption/nip44.go new file mode 100644 index 0000000..0387a6d --- /dev/null +++ b/encryption/nip44.go @@ -0,0 +1,241 @@ +package encryption + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "encoding/binary" + "io" + "math" + "orly.dev/chk" + "orly.dev/errorf" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/hkdf" + + "orly.dev/sha256" +) + +const ( + version byte = 2 + MinPlaintextSize = 0x0001 // 1b msg => padded to 32b + MaxPlaintextSize = 0xffff // 65535 (64kb-1) => padded to 64kb +) + +type Opts struct { + err error + nonce []byte +} + +// Deprecated: use WithCustomNonce instead of WithCustomSalt, so the naming is less confusing +var WithCustomSalt = WithCustomNonce + +// WithCustomNonce enables using a custom nonce (salt) instead of using the +// system crypto/rand entropy source. +func WithCustomNonce(salt []byte) func(opts *Opts) { + return func(opts *Opts) { + if len(salt) != 32 { + opts.err = errorf.E("salt must be 32 bytes, got %d", len(salt)) + } + opts.nonce = salt + } +} + +// Encrypt data using a provided symmetric conversation key using NIP-44 +// encryption (chacha20 cipher stream and sha256 HMAC). +func Encrypt( + plaintext string, conversationKey []byte, + applyOptions ...func(opts *Opts), +) ( + cipherString string, + err error, +) { + + var o Opts + for _, apply := range applyOptions { + apply(&o) + } + if chk.E(o.err) { + err = o.err + return + } + if o.nonce == nil { + o.nonce = make([]byte, 32) + if _, err = rand.Read(o.nonce); chk.E(err) { + return + } + } + var enc, cc20nonce, auth []byte + if enc, cc20nonce, auth, err = getKeys( + conversationKey, o.nonce, + ); chk.E(err) { + return + } + plain := []byte(plaintext) + size := len(plain) + if size < MinPlaintextSize || size > MaxPlaintextSize { + err = errorf.E("plaintext should be between 1b and 64kB") + return + } + padding := CalcPadding(size) + padded := make([]byte, 2+padding) + binary.BigEndian.PutUint16(padded, uint16(size)) + copy(padded[2:], plain) + var cipher []byte + if cipher, err = encrypt(enc, cc20nonce, padded); chk.E(err) { + return + } + var mac []byte + if mac, err = sha256Hmac(auth, cipher, o.nonce); chk.E(err) { + return + } + ct := make([]byte, 0, 1+32+len(cipher)+32) + ct = append(ct, version) + ct = append(ct, o.nonce...) + ct = append(ct, cipher...) + ct = append(ct, mac...) + cipherString = base64.StdEncoding.EncodeToString(ct) + return +} + +// Decrypt data that has been encoded using a provided symmetric conversation +// key using NIP-44 encryption (chacha20 cipher stream and sha256 HMAC). +func Decrypt(b64ciphertextWrapped string, conversationKey []byte) ( + plaintext string, + err error, +) { + cLen := len(b64ciphertextWrapped) + if cLen < 132 || cLen > 87472 { + err = errorf.E("invalid payload length: %d", cLen) + return + } + if b64ciphertextWrapped[:1] == "#" { + err = errorf.E("unknown version") + return + } + var decoded []byte + if decoded, err = base64.StdEncoding.DecodeString(b64ciphertextWrapped); chk.E(err) { + return + } + if decoded[0] != version { + err = errorf.E("unknown version %d", decoded[0]) + return + } + dLen := len(decoded) + if dLen < 99 || dLen > 65603 { + err = errorf.E("invalid data length: %d", dLen) + return + } + nonce, ciphertext, givenMac := decoded[1:33], decoded[33:dLen-32], decoded[dLen-32:] + var enc, cc20nonce, auth []byte + if enc, cc20nonce, auth, err = getKeys(conversationKey, nonce); chk.E(err) { + return + } + var expectedMac []byte + if expectedMac, err = sha256Hmac(auth, ciphertext, nonce); chk.E(err) { + return + } + if !bytes.Equal(givenMac, expectedMac) { + err = errorf.E("invalid hmac") + return + } + var padded []byte + if padded, err = encrypt(enc, cc20nonce, ciphertext); chk.E(err) { + return + } + unpaddedLen := binary.BigEndian.Uint16(padded[0:2]) + if unpaddedLen < uint16(MinPlaintextSize) || unpaddedLen > uint16(MaxPlaintextSize) || + len(padded) != 2+CalcPadding(int(unpaddedLen)) { + err = errorf.E("invalid padding") + return + } + unpadded := padded[2:][:unpaddedLen] + if len(unpadded) == 0 || len(unpadded) != int(unpaddedLen) { + err = errorf.E("invalid padding") + return + } + plaintext = string(unpadded) + return +} + +// GenerateConversationKey performs an ECDH key generation hashed with the nip-44-v2 using hkdf. +func GenerateConversationKey(pkh, skh string) (ck []byte, err error) { + if skh >= "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" || + skh == "0000000000000000000000000000000000000000000000000000000000000000" { + err = errorf.E( + "invalid private key: x coordinate %s is not on the secp256k1 curve", + skh, + ) + return + } + var shared []byte + if shared, err = ComputeSharedSecret(pkh, skh); chk.E(err) { + return + } + ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2")) + return +} + +func encrypt(key, nonce, message []byte) (dst []byte, err error) { + var cipher *chacha20.Cipher + if cipher, err = chacha20.NewUnauthenticatedCipher(key, nonce); chk.E(err) { + return + } + dst = make([]byte, len(message)) + cipher.XORKeyStream(dst, message) + return +} + +func sha256Hmac(key, ciphertext, nonce []byte) (h []byte, err error) { + if len(nonce) != sha256.Size { + err = errorf.E("nonce aad must be 32 bytes") + return + } + hm := hmac.New(sha256.New, key) + hm.Write(nonce) + hm.Write(ciphertext) + h = hm.Sum(nil) + return +} + +func getKeys(conversationKey, nonce []byte) ( + enc, cc20nonce, auth []byte, err error, +) { + if len(conversationKey) != 32 { + err = errorf.E("conversation key must be 32 bytes") + return + } + if len(nonce) != 32 { + err = errorf.E("nonce must be 32 bytes") + return + } + r := hkdf.Expand(sha256.New, conversationKey, nonce) + enc = make([]byte, 32) + if _, err = io.ReadFull(r, enc); chk.E(err) { + return + } + cc20nonce = make([]byte, 12) + if _, err = io.ReadFull(r, cc20nonce); chk.E(err) { + return + } + auth = make([]byte, 32) + if _, err = io.ReadFull(r, auth); chk.E(err) { + return + } + return +} + +// CalcPadding creates padding for the message payload that is precisely a power +// of two in order to reduce the chances of plaintext attack. This is plainly +// retarded because it could blow out the message size a lot when just a random few +// dozen bytes and a length prefix would achieve the same result. +func CalcPadding(sLen int) (l int) { + if sLen <= 32 { + return 32 + } + nextPower := 1 << int(math.Floor(math.Log2(float64(sLen-1)))+1) + chunk := int(math.Max(32, float64(nextPower/8))) + l = chunk * int(math.Floor(float64((sLen-1)/chunk))+1) + return +} diff --git a/encryption/nip44_test.go b/encryption/nip44_test.go new file mode 100644 index 0000000..4b16ea5 --- /dev/null +++ b/encryption/nip44_test.go @@ -0,0 +1,1368 @@ +package encryption + +import ( + "crypto/rand" + "fmt" + "hash" + "orly.dev/chk" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "orly.dev/hex" + "orly.dev/keys" + "orly.dev/sha256" +) + +func assertCryptPriv( + t *testing.T, + sk1, sk2, conversationKey, salt, plaintext, expected string, +) { + var ( + k1, s []byte + actual, decrypted string + ok bool + err error + ) + k1, err = hex.Dec(conversationKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for conversation key: %v", err, + ); !ok { + return + } + if ok = assertConversationKeyGenerationSec( + t, sk1, sk2, conversationKey, + ); !ok { + return + } + s, err = hex.Dec(salt) + if ok = assert.NoErrorf( + t, err, "hex decode failed for salt: %v", err, + ); !ok { + return + } + actual, err = Encrypt(plaintext, k1, WithCustomNonce(s)) + if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok { + return + } + if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok { + return + } + decrypted, err = Decrypt(expected, k1) + if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok { + return + } + assert.Equal(t, decrypted, plaintext, "wrong decryption") +} + +func assertDecryptFail( + t *testing.T, conversationKey, plaintext, ciphertext, msg string, +) { + var ( + k1 []byte + ok bool + err error + ) + k1, err = hex.Dec(conversationKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for conversation key: %v", err, + ); !ok { + return + } + _, err = Decrypt(ciphertext, k1) + assert.ErrorContains(t, err, msg) +} + +func assertConversationKeyFail( + t *testing.T, priv string, pub string, msg string, +) { + _, err := GenerateConversationKey(pub, priv) + assert.ErrorContains(t, err, msg) +} + +func assertConversationKeyGeneration( + t *testing.T, priv, pub, conversationKey string, +) bool { + var ( + actualConversationKey, + expectedConversationKey []byte + ok bool + err error + ) + expectedConversationKey, err = hex.Dec(conversationKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for conversation key: %v", err, + ); !ok { + return false + } + actualConversationKey, err = GenerateConversationKey(pub, priv) + if ok = assert.NoErrorf( + t, err, "conversation key generation failed: %v", err, + ); !ok { + return false + } + if ok = assert.Equalf( + t, expectedConversationKey, actualConversationKey, + "wrong conversation key", + ); !ok { + return false + } + return true +} + +func assertConversationKeyGenerationSec( + t *testing.T, sk1, sk2, conversationKey string, +) bool { + pub2, err := keys.GetPublicKeyHex(sk2) + if ok := assert.NoErrorf( + t, err, "failed to derive pubkey from sk2: %v", err, + ); !ok { + return false + } + return assertConversationKeyGeneration(t, sk1, pub2, conversationKey) +} + +func assertConversationKeyGenerationPub( + t *testing.T, sk, pub, conversationKey string, +) bool { + return assertConversationKeyGeneration(t, sk, pub, conversationKey) +} + +func assertMessageKeyGeneration( + t *testing.T, + conversationKey, salt, chachaKey, chachaSalt, hmacKey string, +) bool { + var ( + convKey, convSalt, actualChaChaKey, expectedChaChaKey, actualChaChaNonce, + expectedChaChaNonce, actualHmacKey, expectedHmacKey []byte + ok bool + err error + ) + convKey, err = hex.Dec(conversationKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for convKey: %v", err, + ); !ok { + return false + } + convSalt, err = hex.Dec(salt) + if ok = assert.NoErrorf( + t, err, "hex decode failed for salt: %v", err, + ); !ok { + return false + } + expectedChaChaKey, err = hex.Dec(chachaKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for encrypt key: %v", err, + ); !ok { + return false + } + expectedChaChaNonce, err = hex.Dec(chachaSalt) + if ok = assert.NoErrorf( + t, err, "hex decode failed for encrypt nonce: %v", err, + ); !ok { + return false + } + expectedHmacKey, err = hex.Dec(hmacKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for hmac key: %v", err, + ); !ok { + return false + } + actualChaChaKey, actualChaChaNonce, actualHmacKey, err = getKeys( + convKey, convSalt, + ) + if ok = assert.NoErrorf( + t, err, "message key generation failed: %v", err, + ); !ok { + return false + } + if ok = assert.Equalf( + t, expectedChaChaKey, actualChaChaKey, "wrong encrypt key", + ); !ok { + return false + } + if ok = assert.Equalf( + t, expectedChaChaNonce, actualChaChaNonce, + "wrong encrypt nonce", + ); !ok { + return false + } + if ok = assert.Equalf( + t, expectedHmacKey, actualHmacKey, "wrong hmac key", + ); !ok { + return false + } + return true +} + +func assertCryptLong( + t *testing.T, conversationKey, salt, pattern string, repeat int, + plaintextSha256, payloadSha256 string, +) { + var ( + convKey, convSalt []byte + plaintext, actualPlaintextSha256, actualPayload, actualPayloadSha256 string + h hash.Hash + ok bool + err error + ) + convKey, err = hex.Dec(conversationKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for convKey: %v", err, + ); !ok { + return + } + convSalt, err = hex.Dec(salt) + if ok = assert.NoErrorf( + t, err, "hex decode failed for salt: %v", err, + ); !ok { + return + } + plaintext = "" + for i := 0; i < repeat; i++ { + plaintext += pattern + } + h = sha256.New() + h.Write([]byte(plaintext)) + actualPlaintextSha256 = hex.Enc(h.Sum(nil)) + if ok = assert.Equalf( + t, plaintextSha256, actualPlaintextSha256, + "invalid plaintext sha256 hash: %v", err, + ); !ok { + return + } + actualPayload, err = Encrypt(plaintext, convKey, WithCustomNonce(convSalt)) + if ok = assert.NoErrorf(t, err, "encryption failed: %v", err); !ok { + return + } + h.Reset() + h.Write([]byte(actualPayload)) + actualPayloadSha256 = hex.Enc(h.Sum(nil)) + if ok = assert.Equalf( + t, payloadSha256, actualPayloadSha256, + "invalid payload sha256 hash: %v", err, + ); !ok { + return + } +} + +func TestCryptPriv001(t *testing.T) { + assertCryptPriv( + t, + "0000000000000000000000000000000000000000000000000000000000000001", + "0000000000000000000000000000000000000000000000000000000000000002", + "c41c775356fd92eadc63ff5a0dc1da211b268cbea22316767095b2871ea1412d", + "0000000000000000000000000000000000000000000000000000000000000001", + "a", + "AgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABee0G5VSK0/9YypIObAtDKfYEAjD35uVkHyB0F4DwrcNaCXlCWZKaArsGrY6M9wnuTMxWfp1RTN9Xga8no+kF5Vsb", + ) +} + +func TestCryptPriv002(t *testing.T) { + assertCryptPriv( + t, + "0000000000000000000000000000000000000000000000000000000000000002", + "0000000000000000000000000000000000000000000000000000000000000001", + "c41c775356fd92eadc63ff5a0dc1da211b268cbea22316767095b2871ea1412d", + "f00000000000000000000000000000f00000000000000000000000000000000f", + "🍕🫃", + "AvAAAAAAAAAAAAAAAAAAAPAAAAAAAAAAAAAAAAAAAAAPSKSK6is9ngkX2+cSq85Th16oRTISAOfhStnixqZziKMDvB0QQzgFZdjLTPicCJaV8nDITO+QfaQ61+KbWQIOO2Yj", + ) +} + +func TestCryptPriv003(t *testing.T) { + assertCryptPriv( + t, + "5c0c523f52a5b6fad39ed2403092df8cebc36318b39383bca6c00808626fab3a", + "4b22aa260e4acb7021e32f38a6cdf4b673c6a277755bfce287e370c924dc936d", + "3e2b52a63be47d34fe0a80e34e73d436d6963bc8f39827f327057a9986c20a45", + "b635236c42db20f021bb8d1cdff5ca75dd1a0cc72ea742ad750f33010b24f73b", + "表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀", + "ArY1I2xC2yDwIbuNHN/1ynXdGgzHLqdCrXUPMwELJPc7s7JqlCMJBAIIjfkpHReBPXeoMCyuClwgbT419jUWU1PwaNl4FEQYKCDKVJz+97Mp3K+Q2YGa77B6gpxB/lr1QgoqpDf7wDVrDmOqGoiPjWDqy8KzLueKDcm9BVP8xeTJIxs=", + ) +} + +func TestCryptPriv004(t *testing.T) { + assertCryptPriv( + t, + "8f40e50a84a7462e2b8d24c28898ef1f23359fff50d8c509e6fb7ce06e142f9c", + "b9b0a1e9cc20100c5faa3bbe2777303d25950616c4c6a3fa2e3e046f936ec2ba", + "d5a2f879123145a4b291d767428870f5a8d9e5007193321795b40183d4ab8c2b", + "b20989adc3ddc41cd2c435952c0d59a91315d8c5218d5040573fc3749543acaf", + "ability🤝的 ȺȾ", + "ArIJia3D3cQc0sQ1lSwNWakTFdjFIY1QQFc/w3SVQ6yvbG2S0x4Yu86QGwPTy7mP3961I1XqB6SFFTzqDZZavhxoWMj7mEVGMQIsh2RLWI5EYQaQDIePSnXPlzf7CIt+voTD", + ) +} + +func TestCryptPriv005(t *testing.T) { + assertCryptPriv( + t, + "875adb475056aec0b4809bd2db9aa00cff53a649e7b59d8edcbf4e6330b0995c", + "9c05781112d5b0a2a7148a222e50e0bd891d6b60c5483f03456e982185944aae", + "3b15c977e20bfe4b8482991274635edd94f366595b1a3d2993515705ca3cedb8", + "8d4442713eb9d4791175cb040d98d6fc5be8864d6ec2f89cf0895a2b2b72d1b1", + "pepper👀їжак", + "Ao1EQnE+udR5EXXLBA2Y1vxb6IZNbsL4nPCJWisrctGxY3AduCS+jTUgAAnfvKafkmpy15+i9YMwCdccisRa8SvzW671T2JO4LFSPX31K4kYUKelSAdSPwe9NwO6LhOsnoJ+", + ) +} + +func TestCryptPriv006(t *testing.T) { + assertCryptPriv( + t, + "eba1687cab6a3101bfc68fd70f214aa4cc059e9ec1b79fdb9ad0a0a4e259829f", + "dff20d262bef9dfd94666548f556393085e6ea421c8af86e9d333fa8747e94b3", + "4f1538411098cf11c8af216836444787c462d47f97287f46cf7edb2c4915b8a5", + "2180b52ae645fcf9f5080d81b1f0b5d6f2cd77ff3c986882bb549158462f3407", + "( ͡° ͜ʖ ͡°)", + "AiGAtSrmRfz59QgNgbHwtdbyzXf/PJhogrtUkVhGLzQHv4qhKQwnFQ54OjVMgqCea/Vj0YqBSdhqNR777TJ4zIUk7R0fnizp6l1zwgzWv7+ee6u+0/89KIjY5q1wu6inyuiv", + ) +} + +func TestCryptPriv007(t *testing.T) { + assertCryptPriv( + t, + "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e", + "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214", + "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd", + "e4cd5f7ce4eea024bc71b17ad456a986a74ac426c2c62b0a15eb5c5c8f888b68", + "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ،", + "AuTNX3zk7qAkvHGxetRWqYanSsQmwsYrChXrXFyPiItoIBsWu1CB+sStla2M4VeANASHxM78i1CfHQQH1YbBy24Tng7emYW44ol6QkFD6D8Zq7QPl+8L1c47lx8RoODEQMvNCbOk5ffUV3/AhONHBXnffrI+0025c+uRGzfqpYki4lBqm9iYU+k3Tvjczq9wU0mkVDEaM34WiQi30MfkJdRbeeYaq6kNvGPunLb3xdjjs5DL720d61Flc5ZfoZm+CBhADy9D9XiVZYLKAlkijALJur9dATYKci6OBOoc2SJS2Clai5hOVzR0yVeyHRgRfH9aLSlWW5dXcUxTo7qqRjNf8W5+J4jF4gNQp5f5d0YA4vPAzjBwSP/5bGzNDslKfcAH", + ) +} + +func TestCryptPriv008(t *testing.T) { + assertCryptPriv( + t, + "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e", + "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214", + "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd", + "e4cd5f7ce4eea024bc71b17ad456a986a74ac426c2c62b0a15eb5c5c8f888b68", + "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ،", + "AuTNX3zk7qAkvHGxetRWqYanSsQmwsYrChXrXFyPiItoIBsWu1CB+sStla2M4VeANASHxM78i1CfHQQH1YbBy24Tng7emYW44ol6QkFD6D8Zq7QPl+8L1c47lx8RoODEQMvNCbOk5ffUV3/AhONHBXnffrI+0025c+uRGzfqpYki4lBqm9iYU+k3Tvjczq9wU0mkVDEaM34WiQi30MfkJdRbeeYaq6kNvGPunLb3xdjjs5DL720d61Flc5ZfoZm+CBhADy9D9XiVZYLKAlkijALJur9dATYKci6OBOoc2SJS2Clai5hOVzR0yVeyHRgRfH9aLSlWW5dXcUxTo7qqRjNf8W5+J4jF4gNQp5f5d0YA4vPAzjBwSP/5bGzNDslKfcAH", + ) +} + +func TestCryptPriv009X(t *testing.T) { + assertCryptPriv( + t, + "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e", + "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214", + "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd", + "38d1ca0abef9e5f564e89761a86cee04574b6825d3ef2063b10ad75899e4b023", + "الكل في المجمو عة (5)", + "AjjRygq++eX1ZOiXYahs7gRXS2gl0+8gY7EK11iZ5LAjbOTrlfrxak5Lki42v2jMPpLSicy8eHjsWkkMtF0i925vOaKG/ZkMHh9ccQBdfTvgEGKzztedqDCAWb5TP1YwU1PsWaiiqG3+WgVvJiO4lUdMHXL7+zKKx8bgDtowzz4QAwI=", + ) +} + +func TestCryptPriv010(t *testing.T) { + assertCryptPriv( + t, + "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e", + "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214", + "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd", + "4f1a31909f3483a9e69c8549a55bbc9af25fa5bbecf7bd32d9896f83ef2e12e0", + "𝖑𝖆𝖟𝖞 社會科學院語學研究所", + "Ak8aMZCfNIOp5pyFSaVbvJryX6W77Pe9MtmJb4PvLhLgh/TsxPLFSANcT67EC1t/qxjru5ZoADjKVEt2ejdx+xGvH49mcdfbc+l+L7gJtkH7GLKpE9pQNQWNHMAmj043PAXJZ++fiJObMRR2mye5VHEANzZWkZXMrXF7YjuG10S1pOU=", + ) +} + +func TestCryptPriv011(t *testing.T) { + assertCryptPriv( + t, + "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e", + "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214", + "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd", + "a3e219242d85465e70adcd640b564b3feff57d2ef8745d5e7a0663b2dccceb54", + "🙈 🙉 🙊 0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟 Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗", + "AqPiGSQthUZecK3NZAtWSz/v9X0u+HRdXnoGY7LczOtUf05aMF89q1FLwJvaFJYICZoMYgRJHFLwPiOHce7fuAc40kX0wXJvipyBJ9HzCOj7CgtnC1/cmPCHR3s5AIORmroBWglm1LiFMohv1FSPEbaBD51VXxJa4JyWpYhreSOEjn1wd0lMKC9b+osV2N2tpbs+rbpQem2tRen3sWflmCqjkG5VOVwRErCuXuPb5+hYwd8BoZbfCrsiAVLd7YT44dRtKNBx6rkabWfddKSLtreHLDysOhQUVOp/XkE7OzSkWl6sky0Hva6qJJ/V726hMlomvcLHjE41iKmW2CpcZfOedg==", + ) +} + +func TestCryptLong001(t *testing.T) { + assertCryptLong( + t, + "8fc262099ce0d0bb9b89bac05bb9e04f9bc0090acc181fef6840ccee470371ed", + "326bcb2c943cd6bb717588c9e5a7e738edf6ed14ec5f5344caa6ef56f0b9cff7", + "x", + 65535, + "09ab7495d3e61a76f0deb12cb0306f0696cbb17ffc12131368c7a939f12f56d3", + "90714492225faba06310bff2f249ebdc2a5e609d65a629f1c87f2d4ffc55330a", + ) +} + +func TestCryptLong002(t *testing.T) { + assertCryptLong( + t, + "56adbe3720339363ab9c3b8526ffce9fd77600927488bfc4b59f7a68ffe5eae0", + "ad68da81833c2a8ff609c3d2c0335fd44fe5954f85bb580c6a8d467aa9fc5dd0", + "!", + 65535, + "6af297793b72ae092c422e552c3bb3cbc310da274bd1cf9e31023a7fe4a2d75e", + "8013e45a109fad3362133132b460a2d5bce235fe71c8b8f4014793fb52a49844", + ) +} + +func TestCryptLong003(t *testing.T) { + assertCryptLong( + t, + "7fc540779979e472bb8d12480b443d1e5eb1098eae546ef2390bee499bbf46be", + "34905e82105c20de9a2f6cd385a0d541e6bcc10601d12481ff3a7575dc622033", + "🦄", + 16383, + "a249558d161b77297bc0cb311dde7d77190f6571b25c7e4429cd19044634a61f", + "b3348422471da1f3c59d79acfe2fe103f3cd24488109e5b18734cdb5953afd15", + ) +} + +func TestConversationKeyFail001(t *testing.T) { + // sec1 higher than curve.n + assertConversationKeyFail( + t, + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "invalid private key: x coordinate ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff is not on the secp256k1 curve", + ) +} + +func TestConversationKeyFail002(t *testing.T) { + // sec1 is 0 + assertConversationKeyFail( + t, + "0000000000000000000000000000000000000000000000000000000000000000", + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "invalid private key: x coordinate 0000000000000000000000000000000000000000000000000000000000000000 is not on the secp256k1 curve", + ) +} + +func TestConversationKeyFail003(t *testing.T) { + // pub2 is invalid, no sqrt, all-ff + assertConversationKeyFail( + t, + "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364139", + "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + "invalid public key: x >= field prime", + // "invalid public key: x >= field prime", + ) +} + +func TestConversationKeyFail004(t *testing.T) { + // sec1 == curve.n + assertConversationKeyFail( + t, + "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141", + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "invalid private key: x coordinate fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 is not on the secp256k1 curve", + ) +} + +func TestConversationKeyFail005(t *testing.T) { + // pub2 is invalid, no sqrt + assertConversationKeyFail( + t, + "0000000000000000000000000000000000000000000000000000000000000002", + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + "invalid public key: x coordinate 1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef is not on the secp256k1 curve", + // "invalid public key: x coordinate 1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef is not on the secp256k1 curve", + ) +} + +func TestConversationKeyFail006(t *testing.T) { + // pub2 is point of order 3 on twist + assertConversationKeyFail( + t, + "0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20", + "0000000000000000000000000000000000000000000000000000000000000000", + "invalid public key: x coordinate 0000000000000000000000000000000000000000000000000000000000000000 is not on the secp256k1 curve", + // "invalid public key: x coordinate 0000000000000000000000000000000000000000000000000000000000000000 is not on the secp256k1 curve", + ) +} + +func TestConversationKeyFail007(t *testing.T) { + // pub2 is point of order 13 on twist + assertConversationKeyFail( + t, + "0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20", + "eb1f7200aecaa86682376fb1c13cd12b732221e774f553b0a0857f88fa20f86d", + "invalid public key: x coordinate eb1f7200aecaa86682376fb1c13cd12b732221e774f553b0a0857f88fa20f86d is not on the secp256k1 curve", + // "invalid public key: x coordinate eb1f7200aecaa86682376fb1c13cd12b732221e774f553b0a0857f88fa20f86d is not on the secp256k1 curve", + ) +} + +func TestConversationKeyFail008(t *testing.T) { + // pub2 is point of order 3319 on twist + assertConversationKeyFail( + t, + "0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20", + "709858a4c121e4a84eb59c0ded0261093c71e8ca29efeef21a6161c447bcaf9f", + "invalid public key: x coordinate 709858a4c121e4a84eb59c0ded0261093c71e8ca29efeef21a6161c447bcaf9f is not on the secp256k1 curve", + // "invalid public key: x coordinate 709858a4c121e4a84eb59c0ded0261093c71e8ca29efeef21a6161c447bcaf9f is not on the secp256k1 curve", + ) +} + +func TestDecryptFail001(t *testing.T) { + assertDecryptFail( + t, + "ca2527a037347b91bea0c8a30fc8d9600ffd81ec00038671e3a0f0cb0fc9f642", + // "daaea5ca345b268e5b62060ca72c870c48f713bc1e00ff3fc0ddb78e826f10db", + "n o b l e", + "#Atqupco0WyaOW2IGDKcshwxI9xO8HgD/P8Ddt46CbxDbrhdG8VmJdU0MIDf06CUvEvdnr1cp1fiMtlM/GrE92xAc1K5odTpCzUB+mjXgbaqtntBUbTToSUoT0ovrlPwzGjyp", + "unknown version", + ) +} + +func TestDecryptFail002(t *testing.T) { + assertDecryptFail( + t, + "36f04e558af246352dcf73b692fbd3646a2207bd8abd4b1cd26b234db84d9481", + // "ad408d4be8616dc84bb0bf046454a2a102edac937c35209c43cd7964c5feb781", + "⚠️", + "AK1AjUvoYW3IS7C/BGRUoqEC7ayTfDUgnEPNeWTF/reBZFaha6EAIRueE9D1B1RuoiuFScC0Q94yjIuxZD3JStQtE8JMNacWFs9rlYP+ZydtHhRucp+lxfdvFlaGV/sQlqZz", + "unknown version 0", + ) +} + +func TestDecryptFail003(t *testing.T) { + assertDecryptFail( + t, + "ca2527a037347b91bea0c8a30fc8d9600ffd81ec00038671e3a0f0cb0fc9f642", + // "daaea5ca345b268e5b62060ca72c870c48f713bc1e00ff3fc0ddb78e826f10db", + "n o s t r", + "Atфupco0WyaOW2IGDKcshwxI9xO8HgD/P8Ddt46CbxDbrhdG8VmJZE0UICD06CUvEvdnr1cp1fiMtlM/GrE92xAc1EwsVCQEgWEu2gsHUVf4JAa3TpgkmFc3TWsax0v6n/Wq", + "illegal base64 data at input byte 2", + ) +} + +func TestDecryptFail004(t *testing.T) { + assertDecryptFail( + t, + "cff7bd6a3e29a450fd27f6c125d5edeb0987c475fd1e8d97591e0d4d8a89763c", + // "09ff97750b084012e15ecb84614ce88180d7b8ec0d468508a86b6d70c0361a25", + "¯\\_(ツ)_/¯", + "Agn/l3ULCEAS4V7LhGFM6IGA17jsDUaFCKhrbXDANholyySBfeh+EN8wNB9gaLlg4j6wdBYh+3oK+mnxWu3NKRbSvQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + "invalid hmac", + ) +} + +func TestDecryptFail005(t *testing.T) { + assertDecryptFail( + t, + "cfcc9cf682dfb00b11357f65bdc45e29156b69db424d20b3596919074f5bf957", + // "65b14b0b949aaa7d52c417eb753b390e8ad6d84b23af4bec6d9bfa3e03a08af4", + "🥎", + "AmWxSwuUmqp9UsQX63U7OQ6K1thLI69L7G2b+j4DoIr0oRWQ8avl4OLqWZiTJ10vIgKrNqjoaX+fNhE9RqmR5g0f6BtUg1ijFMz71MO1D4lQLQfW7+UHva8PGYgQ1QpHlKgR", + "invalid hmac", + ) +} + +func TestDecryptFail006(t *testing.T) { + assertDecryptFail( + t, + "5254827d29177622d40a7b67cad014fe7137700c3c523903ebbe3e1b74d40214", + // "7ab65dbb8bbc2b8e35cafb5745314e1f050325a864d11d0475ef75b3660d91c1", + "elliptic-curve cryptography", + "Anq2XbuLvCuONcr7V0UxTh8FAyWoZNEdBHXvdbNmDZHB573MI7R7rrTYftpqmvUpahmBC2sngmI14/L0HjOZ7lWGJlzdh6luiOnGPc46cGxf08MRC4CIuxx3i2Lm0KqgJ7vA", + "invalid padding", + ) +} + +func TestDecryptFail007(t *testing.T) { + assertDecryptFail( + t, + "fea39aca9aa8340c3a78ae1f0902aa7e726946e4efcd7783379df8096029c496", + // "7d4283e3b54c885d6afee881f48e62f0a3f5d7a9e1cb71ccab594a7882c39330", + "noble", + "An1Cg+O1TIhdav7ogfSOYvCj9dep4ctxzKtZSniCw5MwRrrPJFyAQYZh5VpjC2QYzny5LIQ9v9lhqmZR4WBYRNJ0ognHVNMwiFV1SHpvUFT8HHZN/m/QarflbvDHAtO6pY16", + "invalid padding", + ) +} + +func TestDecryptFail008(t *testing.T) { + assertDecryptFail( + t, + "0c4cffb7a6f7e706ec94b2e879f1fc54ff8de38d8db87e11787694d5392d5b3f", + // "6f9fd72667c273acd23ca6653711a708434474dd9eb15c3edb01ce9a95743e9b", + "censorship-resistant and global social network", + "Am+f1yZnwnOs0jymZTcRpwhDRHTdnrFcPtsBzpqVdD6b2NZDaNm/TPkZGr75kbB6tCSoq7YRcbPiNfJXNch3Tf+o9+zZTMxwjgX/nm3yDKR2kHQMBhVleCB9uPuljl40AJ8kXRD0gjw+aYRJFUMK9gCETZAjjmrsCM+nGRZ1FfNsHr6Z", + "invalid padding", + ) +} + +func TestDecryptFail009(t *testing.T) { + assertDecryptFail( + t, + "5cd2d13b9e355aeb2452afbd3786870dbeecb9d355b12cb0a3b6e9da5744cd35", + // "b60036976a1ada277b948fd4caa065304b96964742b89d26f26a25263a5060bd", + "0", + "", + "invalid payload length: 0", + ) +} + +func TestDecryptFail010(t *testing.T) { + assertDecryptFail( + t, + "d61d3f09c7dfe1c0be91af7109b60a7d9d498920c90cbba1e137320fdd938853", + // "1a29d02c8b4527745a2ccb38bfa45655deb37bc338ab9289d756354cea1fd07c", + "1", + "Ag==", + "invalid payload length: 4", + ) +} + +func TestDecryptFail011(t *testing.T) { + assertDecryptFail( + t, + "873bb0fc665eb950a8e7d5971965539f6ebd645c83c08cd6a85aafbad0f0bc47", + // "c826d3c38e765ab8cc42060116cd1464b2a6ce01d33deba5dedfb48615306d4a", + "2", + "AqxgToSh3H7iLYRJjoWAM+vSv/Y1mgNlm6OWWjOYUClrFF8=", + "invalid payload length: 48", + ) +} + +func TestDecryptFail012(t *testing.T) { + assertDecryptFail( + t, + "9f2fef8f5401ac33f74641b568a7a30bb19409c76ffdc5eae2db6b39d2617fbe", + // "9ff6484642545221624eaac7b9ea27133a4cc2356682a6033aceeef043549861", + "3", + "Ap/2SEZCVFIhYk6qx7nqJxM6TMI1ZoKmAzrO7vBDVJhhuZXWiM20i/tIsbjT0KxkJs2MZjh1oXNYMO9ggfk7i47WQA==", + "invalid payload length: 92", + ) +} + +func TestConversationKey001(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "315e59ff51cb9209768cf7da80791ddcaae56ac9775eb25b6dee1234bc5d2268", + "c2f9d9948dc8c7c38321e4b85c8558872eafa0641cd269db76848a6073e69133", + "3dfef0ce2a4d80a25e7a328accf73448ef67096f65f79588e358d9a0eb9013f1", + ) +} + +func TestConversationKey002(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "a1e37752c9fdc1273be53f68c5f74be7c8905728e8de75800b94262f9497c86e", + "03bb7947065dde12ba991ea045132581d0954f042c84e06d8c00066e23c1a800", + "4d14f36e81b8452128da64fe6f1eae873baae2f444b02c950b90e43553f2178b", + ) +} + +func TestConversationKey003(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "98a5902fd67518a0c900f0fb62158f278f94a21d6f9d33d30cd3091195500311", + "aae65c15f98e5e677b5050de82e3aba47a6fe49b3dab7863cf35d9478ba9f7d1", + "9c00b769d5f54d02bf175b7284a1cbd28b6911b06cda6666b2243561ac96bad7", + ) +} + +func TestConversationKey004(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "86ae5ac8034eb2542ce23ec2f84375655dab7f836836bbd3c54cefe9fdc9c19f", + "59f90272378089d73f1339710c02e2be6db584e9cdbe86eed3578f0c67c23585", + "19f934aafd3324e8415299b64df42049afaa051c71c98d0aa10e1081f2e3e2ba", + ) +} + +func TestConversationKey005(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "2528c287fe822421bc0dc4c3615878eb98e8a8c31657616d08b29c00ce209e34", + "f66ea16104c01a1c532e03f166c5370a22a5505753005a566366097150c6df60", + "c833bbb292956c43366145326d53b955ffb5da4e4998a2d853611841903f5442", + ) +} + +func TestConversationKey006(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "49808637b2d21129478041813aceb6f2c9d4929cd1303cdaf4fbdbd690905ff2", + "74d2aab13e97827ea21baf253ad7e39b974bb2498cc747cdb168582a11847b65", + "4bf304d3c8c4608864c0fe03890b90279328cd24a018ffa9eb8f8ccec06b505d", + ) +} + +func TestConversationKey007(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "af67c382106242c5baabf856efdc0629cc1c5b4061f85b8ceaba52aa7e4b4082", + "bdaf0001d63e7ec994fad736eab178ee3c2d7cfc925ae29f37d19224486db57b", + "a3a575dd66d45e9379904047ebfb9a7873c471687d0535db00ef2daa24b391db", + ) +} + +func TestConversationKey008(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "0e44e2d1db3c1717b05ffa0f08d102a09c554a1cbbf678ab158b259a44e682f1", + "1ffa76c5cc7a836af6914b840483726207cb750889753d7499fb8b76aa8fe0de", + "a39970a667b7f861f100e3827f4adbf6f464e2697686fe1a81aeda817d6b8bdf", + ) +} + +func TestConversationKey009(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "5fc0070dbd0666dbddc21d788db04050b86ed8b456b080794c2a0c8e33287bb6", + "31990752f296dd22e146c9e6f152a269d84b241cc95bb3ff8ec341628a54caf0", + "72c21075f4b2349ce01a3e604e02a9ab9f07e35dd07eff746de348b4f3c6365e", + ) +} + +func TestConversationKey010(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "1b7de0d64d9b12ddbb52ef217a3a7c47c4362ce7ea837d760dad58ab313cba64", + "24383541dd8083b93d144b431679d70ef4eec10c98fceef1eff08b1d81d4b065", + "dd152a76b44e63d1afd4dfff0785fa07b3e494a9e8401aba31ff925caeb8f5b1", + ) +} + +func TestConversationKey011(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "df2f560e213ca5fb33b9ecde771c7c0cbd30f1cf43c2c24de54480069d9ab0af", + "eeea26e552fc8b5e377acaa03e47daa2d7b0c787fac1e0774c9504d9094c430e", + "770519e803b80f411c34aef59c3ca018608842ebf53909c48d35250bd9323af6", + ) +} + +func TestConversationKey012(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "cffff919fcc07b8003fdc63bc8a00c0f5dc81022c1c927c62c597352190d95b9", + "eb5c3cca1a968e26684e5b0eb733aecfc844f95a09ac4e126a9e58a4e4902f92", + "46a14ee7e80e439ec75c66f04ad824b53a632b8409a29bbb7c192e43c00bb795", + ) +} + +func TestConversationKey013(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "64ba5a685e443e881e9094647ddd32db14444bb21aa7986beeba3d1c4673ba0a", + "50e6a4339fac1f3bf86f2401dd797af43ad45bbf58e0801a7877a3984c77c3c4", + "968b9dbbfcede1664a4ca35a5d3379c064736e87aafbf0b5d114dff710b8a946", + ) +} + +func TestConversationKey014(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "dd0c31ccce4ec8083f9b75dbf23cc2878e6d1b6baa17713841a2428f69dee91a", + "b483e84c1339812bed25be55cff959778dfc6edde97ccd9e3649f442472c091b", + "09024503c7bde07eb7865505891c1ea672bf2d9e25e18dd7a7cea6c69bf44b5d", + ) +} + +func TestConversationKey015(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "af71313b0d95c41e968a172b33ba5ebd19d06cdf8a7a98df80ecf7af4f6f0358", + "2a5c25266695b461ee2af927a6c44a3c598b8095b0557e9bd7f787067435bc7c", + "fe5155b27c1c4b4e92a933edae23726a04802a7cc354a77ac273c85aa3c97a92", + ) +} + +func TestConversationKey016(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "6636e8a389f75fe068a03b3edb3ea4a785e2768e3f73f48ffb1fc5e7cb7289dc", + "514eb2064224b6a5829ea21b6e8f7d3ea15ff8e70e8555010f649eb6e09aec70", + "ff7afacd4d1a6856d37ca5b546890e46e922b508639214991cf8048ddbe9745c", + ) +} + +func TestConversationKey017(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "94b212f02a3cfb8ad147d52941d3f1dbe1753804458e6645af92c7b2ea791caa", + "f0cac333231367a04b652a77ab4f8d658b94e86b5a8a0c472c5c7b0d4c6a40cc", + "e292eaf873addfed0a457c6bd16c8effde33d6664265697f69f420ab16f6669b", + ) +} + +func TestConversationKey018(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "aa61f9734e69ae88e5d4ced5aae881c96f0d7f16cca603d3bed9eec391136da6", + "4303e5360a884c360221de8606b72dd316da49a37fe51e17ada4f35f671620a6", + "8e7d44fd4767456df1fb61f134092a52fcd6836ebab3b00766e16732683ed848", + ) +} + +func TestConversationKey019(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "5e914bdac54f3f8e2cba94ee898b33240019297b69e96e70c8a495943a72fc98", + "5bd097924f606695c59f18ff8fd53c174adbafaaa71b3c0b4144a3e0a474b198", + "f5a0aecf2984bf923c8cd5e7bb8be262d1a8353cb93959434b943a07cf5644bc", + ) +} + +func TestConversationKey020(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "8b275067add6312ddee064bcdbeb9d17e88aa1df36f430b2cea5cc0413d8278a", + "65bbbfca819c90c7579f7a82b750a18c858db1afbec8f35b3c1e0e7b5588e9b8", + "2c565e7027eb46038c2263563d7af681697107e975e9914b799d425effd248d6", + ) +} + +func TestConversationKey021(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "1ac848de312285f85e0f7ec208aac20142a1f453402af9b34ec2ec7a1f9c96fc", + "45f7318fe96034d23ee3ddc25b77f275cc1dd329664dd51b89f89c4963868e41", + "b56e970e5057a8fd929f8aad9248176b9af87819a708d9ddd56e41d1aec74088", + ) +} + +func TestConversationKey022(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "295a1cf621de401783d29d0e89036aa1c62d13d9ad307161b4ceb535ba1b40e6", + "840115ddc7f1034d3b21d8e2103f6cb5ab0b63cf613f4ea6e61ae3d016715cdd", + "b4ee9c0b9b9fef88975773394f0a6f981ca016076143a1bb575b9ff46e804753", + ) +} + +func TestConversationKey023(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "a28eed0fe977893856ab9667e06ace39f03abbcdb845c329a1981be438ba565d", + "b0f38b950a5013eba5ab4237f9ed29204a59f3625c71b7e210fec565edfa288c", + "9d3a802b45bc5aeeb3b303e8e18a92ddd353375710a31600d7f5fff8f3a7285b", + ) +} + +func TestConversationKey024(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "7ab65af72a478c05f5c651bdc4876c74b63d20d04cdbf71741e46978797cd5a4", + "f1112159161b568a9cb8c9dd6430b526c4204bcc8ce07464b0845b04c041beda", + "943884cddaca5a3fef355e9e7f08a3019b0b66aa63ec90278b0f9fdb64821e79", + ) +} + +func TestConversationKey025(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "95c79a7b75ba40f2229e85756884c138916f9d103fc8f18acc0877a7cceac9fe", + "cad76bcbd31ca7bbda184d20cc42f725ed0bb105b13580c41330e03023f0ffb3", + "81c0832a669eea13b4247c40be51ccfd15bb63fcd1bba5b4530ce0e2632f301b", + ) +} + +func TestConversationKey026(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "baf55cc2febd4d980b4b393972dfc1acf49541e336b56d33d429bce44fa12ec9", + "0c31cf87fe565766089b64b39460ebbfdedd4a2bc8379be73ad3c0718c912e18", + "37e2344da9ecdf60ae2205d81e89d34b280b0a3f111171af7e4391ded93b8ea6", + ) +} + +func TestConversationKey027(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "6eeec45acd2ed31693c5256026abf9f072f01c4abb61f51cf64e6956b6dc8907", + "e501b34ed11f13d816748c0369b0c728e540df3755bab59ed3327339e16ff828", + "afaa141b522ddb27bb880d768903a7f618bb8b6357728cae7fb03af639b946e6", + ) +} + +func TestConversationKey028(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "261a076a9702af1647fb343c55b3f9a4f1096273002287df0015ba81ce5294df", + "b2777c863878893ae100fb740c8fab4bebd2bf7be78c761a75593670380a6112", + "76f8d2853de0734e51189ced523c09427c3e46338b9522cd6f74ef5e5b475c74", + ) +} + +func TestConversationKey029(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "ed3ec71ca406552ea41faec53e19f44b8f90575eda4b7e96380f9cc73c26d6f3", + "86425951e61f94b62e20cae24184b42e8e17afcf55bafa58645efd0172624fae", + "f7ffc520a3a0e9e9b3c0967325c9bf12707f8e7a03f28b6cd69ae92cf33f7036", + ) +} + +func TestConversationKey030(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "5a788fc43378d1303ac78639c59a58cb88b08b3859df33193e63a5a3801c722e", + "a8cba2f87657d229db69bee07850fd6f7a2ed070171a06d006ec3a8ac562cf70", + "7d705a27feeedf78b5c07283362f8e361760d3e9f78adab83e3ae5ce7aeb6409", + ) +} + +func TestConversationKey031(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "63bffa986e382b0ac8ccc1aa93d18a7aa445116478be6f2453bad1f2d3af2344", + "b895c70a83e782c1cf84af558d1038e6b211c6f84ede60408f519a293201031d", + "3a3b8f00d4987fc6711d9be64d9c59cf9a709c6c6481c2cde404bcc7a28f174e", + ) +} + +func TestConversationKey032(t *testing.T) { + assertConversationKeyGenerationPub( + t, + "e4a8bcacbf445fd3721792b939ff58e691cdcba6a8ba67ac3467b45567a03e5c", + "b54053189e8c9252c6950059c783edb10675d06d20c7b342f73ec9fa6ed39c9d", + "7b3933b4ef8189d347169c7955589fc1cfc01da5239591a08a183ff6694c44ad", + ) +} + +func TestConversationKey033(t *testing.T) { + // sec1 = n-2, pub2: random, 0x02 + assertConversationKeyGenerationPub( + t, + "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364139", + "0000000000000000000000000000000000000000000000000000000000000002", + "8b6392dbf2ec6a2b2d5b1477fc2be84d63ef254b667cadd31bd3f444c44ae6ba", + ) +} + +func TestConversationKey034(t *testing.T) { + // sec1 = 2, pub2: rand + assertConversationKeyGenerationPub( + t, + "0000000000000000000000000000000000000000000000000000000000000002", + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdeb", + "be234f46f60a250bef52a5ee34c758800c4ca8e5030bf4cc1a31d37ba2104d43", + ) +} + +func TestConversationKey035(t *testing.T) { + // sec1 == pub2 + assertConversationKeyGenerationPub( + t, + "0000000000000000000000000000000000000000000000000000000000000001", + "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798", + "3b4610cb7189beb9cc29eb3716ecc6102f1247e8f3101a03a1787d8908aeb54e", + ) +} + +func TestMessageKeyGeneration001(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "e1e6f880560d6d149ed83dcc7e5861ee62a5ee051f7fde9975fe5d25d2a02d72", + "f145f3bed47cb70dbeaac07f3a3fe683e822b3715edb7c4fe310829014ce7d76", + "c4ad129bb01180c0933a160c", + "027c1db445f05e2eee864a0975b0ddef5b7110583c8c192de3732571ca5838c4", + ) +} + +func TestMessageKeyGeneration002(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "e1d6d28c46de60168b43d79dacc519698512ec35e8ccb12640fc8e9f26121101", + "e35b88f8d4a8f1606c5082f7a64b100e5d85fcdb2e62aeafbec03fb9e860ad92", + "22925e920cee4a50a478be90", + "46a7c55d4283cb0df1d5e29540be67abfe709e3b2e14b7bf9976e6df994ded30", + ) +} + +func TestMessageKeyGeneration003(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "cfc13bef512ac9c15951ab00030dfaf2626fdca638dedb35f2993a9eeb85d650", + "020783eb35fdf5b80ef8c75377f4e937efb26bcbad0e61b4190e39939860c4bf", + "d3594987af769a52904656ac", + "237ec0ccb6ebd53d179fa8fd319e092acff599ef174c1fdafd499ef2b8dee745", + ) +} + +func TestMessageKeyGeneration004(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "ea6eb84cac23c5c1607c334e8bdf66f7977a7e374052327ec28c6906cbe25967", + "ff68db24b34fa62c78ac5ffeeaf19533afaedf651fb6a08384e46787f6ce94be", + "50bb859aa2dde938cc49ec7a", + "06ff32e1f7b29753a727d7927b25c2dd175aca47751462d37a2039023ec6b5a6", + ) +} + +func TestMessageKeyGeneration005(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "8c2e1dd3792802f1f9f7842e0323e5d52ad7472daf360f26e15f97290173605d", + "2f9daeda8683fdeede81adac247c63cc7671fa817a1fd47352e95d9487989d8b", + "400224ba67fc2f1b76736916", + "465c05302aeeb514e41c13ed6405297e261048cfb75a6f851ffa5b445b746e4b", + ) +} + +func TestMessageKeyGeneration006(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "05c28bf3d834fa4af8143bf5201a856fa5fac1a3aee58f4c93a764fc2f722367", + "1e3d45777025a035be566d80fd580def73ed6f7c043faec2c8c1c690ad31c110", + "021905b1ea3afc17cb9bf96f", + "74a6e481a89dcd130aaeb21060d7ec97ad30f0007d2cae7b1b11256cc70dfb81", + ) +} + +func TestMessageKeyGeneration007(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "5e043fb153227866e75a06d60185851bc90273bfb93342f6632a728e18a07a17", + "1ea72c9293841e7737c71567d8120145a58991aaa1c436ef77bf7adb83f882f1", + "72f69a5a5f795465cee59da8", + "e9daa1a1e9a266ecaa14e970a84bce3fbbf329079bbccda626582b4e66a0d4c9", + ) +} + +func TestMessageKeyGeneration009(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "7be7338eaf06a87e274244847fe7a97f5c6a91f44adc18fcc3e411ad6f786dbf", + "881e7968a1f0c2c80742ee03cd49ea587e13f22699730f1075ade01931582bf6", + "6e69be92d61c04a276021565", + "901afe79e74b19967c8829af23617d7d0ffbf1b57190c096855c6a03523a971b", + ) +} + +func TestMessageKeyGeneration010(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "94571c8d590905bad7becd892832b472f2aa5212894b6ce96e5ba719c178d976", + "f80873dd48466cb12d46364a97b8705c01b9b4230cb3ec3415a6b9551dc42eef", + "3dda53569cfcb7fac1805c35", + "e9fc264345e2839a181affebc27d2f528756e66a5f87b04bf6c5f1997047051e", + ) +} + +func TestMessageKeyGeneration011(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "13a6ee974b1fd759135a2c2010e3cdda47081c78e771125e4f0c382f0284a8cb", + "bc5fb403b0bed0d84cf1db872b6522072aece00363178c98ad52178d805fca85", + "65064239186e50304cc0f156", + "e872d320dde4ed3487958a8e43b48aabd3ced92bc24bb8ff1ccb57b590d9701a", + ) +} + +func TestMessageKeyGeneration012(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "082fecdb85f358367b049b08be0e82627ae1d8edb0f27327ccb593aa2613b814", + "1fbdb1cf6f6ea816349baf697932b36107803de98fcd805ebe9849b8ad0e6a45", + "2e605e1d825a3eaeb613db9c", + "fae910f591cf3c7eb538c598583abad33bc0a03085a96ca4ea3a08baf17c0eec", + ) +} + +func TestMessageKeyGeneration013(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "4c19020c74932c30ec6b2d8cd0d5bb80bd0fc87da3d8b4859d2fb003810afd03", + "1ab9905a0189e01cda82f843d226a82a03c4f5b6dbea9b22eb9bc953ba1370d4", + "cbb2530ea653766e5a37a83a", + "267f68acac01ac7b34b675e36c2cef5e7b7a6b697214add62a491bedd6efc178", + ) +} + +func TestMessageKeyGeneration014(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "67723a3381497b149ce24814eddd10c4c41a1e37e75af161930e6b9601afd0ff", + "9ecbd25e7e2e6c97b8c27d376dcc8c5679da96578557e4e21dba3a7ef4e4ac07", + "ef649fcf335583e8d45e3c2e", + "04dbbd812fa8226fdb45924c521a62e3d40a9e2b5806c1501efdeba75b006bf1", + ) +} + +func TestMessageKeyGeneration015(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "42063fe80b093e8619b1610972b4c3ab9e76c14fd908e642cd4997cafb30f36c", + "211c66531bbcc0efcdd0130f9f1ebc12a769105eb39608994bcb188fa6a73a4a", + "67803605a7e5010d0f63f8c8", + "e840e4e8921b57647369d121c5a19310648105dbdd008200ebf0d3b668704ff8", + ) +} + +func TestMessageKeyGeneration016(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "b5ac382a4be7ac03b554fe5f3043577b47ea2cd7cfc7e9ca010b1ffbb5cf1a58", + "b3b5f14f10074244ee42a3837a54309f33981c7232a8b16921e815e1f7d1bb77", + "4e62a0073087ed808be62469", + "c8efa10230b5ea11633816c1230ca05fa602ace80a7598916d83bae3d3d2ccd7", + ) +} + +func TestMessageKeyGeneration017(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "e9d1eba47dd7e6c1532dc782ff63125db83042bb32841db7eeafd528f3ea7af9", + "54241f68dc2e50e1db79e892c7c7a471856beeb8d51b7f4d16f16ab0645d2f1a", + "a963ed7dc29b7b1046820a1d", + "aba215c8634530dc21c70ddb3b3ee4291e0fa5fa79be0f85863747bde281c8b2", + ) +} + +func TestMessageKeyGeneration018(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "a94ecf8efeee9d7068de730fad8daf96694acb70901d762de39fa8a5039c3c49", + "c0565e9e201d2381a2368d7ffe60f555223874610d3d91fbbdf3076f7b1374dd", + "329bb3024461e84b2e1c489b", + "ac42445491f092481ce4fa33b1f2274700032db64e3a15014fbe8c28550f2fec", + ) +} + +func TestMessageKeyGeneration019(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "533605ea214e70c25e9a22f792f4b78b9f83a18ab2103687c8a0075919eaaa53", + "ab35a5e1e54d693ff023db8500d8d4e79ad8878c744e0eaec691e96e141d2325", + "653d759042b85194d4d8c0a7", + "b43628e37ba3c31ce80576f0a1f26d3a7c9361d29bb227433b66f49d44f167ba", + ) +} + +func TestMessageKeyGeneration020(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "7f38df30ceea1577cb60b355b4f5567ff4130c49e84fed34d779b764a9cc184c", + "a37d7f211b84a551a127ff40908974eb78415395d4f6f40324428e850e8c42a3", + "b822e2c959df32b3cb772a7c", + "1ba31764f01f69b5c89ded2d7c95828e8052c55f5d36f1cd535510d61ba77420", + ) +} + +func TestMessageKeyGeneration021(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "11b37f9dbc4d0185d1c26d5f4ed98637d7c9701fffa65a65839fa4126573a4e5", + "964f38d3a31158a5bfd28481247b18dd6e44d69f30ba2a40f6120c6d21d8a6ba", + "5f72c5b87c590bcd0f93b305", + "2fc4553e7cedc47f29690439890f9f19c1077ef3e9eaeef473d0711e04448918", + ) +} + +func TestMessageKeyGeneration022(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "8be790aa483d4cdd843189f71f135b3ec7e31f381312c8fe9f177aab2a48eafa", + "95c8c74d633721a131316309cf6daf0804d59eaa90ea998fc35bac3d2fbb7a94", + "409a7654c0e4bf8c2c6489be", + "21bb0b06eb2b460f8ab075f497efa9a01c9cf9146f1e3986c3bf9da5689b6dc4", + ) +} + +func TestMessageKeyGeneration023(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "19fd2a718ea084827d6bd73f509229ddf856732108b59fc01819f611419fd140", + "cc6714b9f5616c66143424e1413d520dae03b1a4bd202b82b0a89b0727f5cdc8", + "1b7fd2534f015a8f795d8f32", + "2bef39c4ce5c3c59b817e86351373d1554c98bc131c7e461ed19d96cfd6399a0", + ) +} + +func TestMessageKeyGeneration024(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "3c2acd893952b2f6d07d8aea76f545ca45961a93fe5757f6a5a80811d5e0255d", + "c8de6c878cb469278d0af894bc181deb6194053f73da5014c2b5d2c8db6f2056", + "6ffe4f1971b904a1b1a81b99", + "df1cd69dd3646fca15594284744d4211d70e7d8472e545d276421fbb79559fd4", + ) +} + +func TestMessageKeyGeneration025(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "7dbea4cead9ac91d4137f1c0a6eebb6ba0d1fb2cc46d829fbc75f8d86aca6301", + "c8e030f6aa680c3d0b597da9c92bb77c21c4285dd620c5889f9beba7446446b0", + "a9b5a67d081d3b42e737d16f", + "355a85f551bc3cce9a14461aa60994742c9bbb1c81a59ca102dc64e61726ab8e", + ) +} + +func TestMessageKeyGeneration026(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "45422e676cdae5f1071d3647d7a5f1f5adafb832668a578228aa1155a491f2f3", + "758437245f03a88e2c6a32807edfabff51a91c81ca2f389b0b46f2c97119ea90", + "263830a065af33d9c6c5aa1f", + "7c581cf3489e2de203a95106bfc0de3d4032e9d5b92b2b61fb444acd99037e17", + ) +} + +func TestMessageKeyGeneration027(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "babc0c03fad24107ad60678751f5db2678041ff0d28671ede8d65bdf7aa407e9", + "bd68a28bd48d9ffa3602db72c75662ac2848a0047a313d2ae2d6bc1ac153d7e9", + "d0f9d2a1ace6c758f594ffdd", + "eb435e3a642adfc9d59813051606fc21f81641afd58ea6641e2f5a9f123bb50a", + ) +} + +func TestMessageKeyGeneration028(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "7a1b8aac37d0d20b160291fad124ab697cfca53f82e326d78fef89b4b0ea8f83", + "9e97875b651a1d30d17d086d1e846778b7faad6fcbc12e08b3365d700f62e4fe", + "ccdaad5b3b7645be430992eb", + "6f2f55cf35174d75752f63c06cc7cbc8441759b142999ed2d5a6d09d263e1fc4", + ) +} + +func TestMessageKeyGeneration029(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "8370e4e32d7e680a83862cab0da6136ef607014d043e64cdf5ecc0c4e20b3d9a", + "1472bed5d19db9c546106de946e0649cd83cc9d4a66b087a65906e348dcf92e2", + "ed02dece5fc3a186f123420b", + "7b3f7739f49d30c6205a46b174f984bb6a9fc38e5ccfacef2dac04fcbd3b184e", + ) +} + +func TestMessageKeyGeneration030(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "9f1c5e8a29cd5677513c2e3a816551d6833ee54991eb3f00d5b68096fc8f0183", + "5e1a7544e4d4dafe55941fcbdf326f19b0ca37fc49c4d47e9eec7fb68cde4975", + "7d9acb0fdc174e3c220f40de", + "e265ab116fbbb86b2aefc089a0986a0f5b77eda50c7410404ad3b4f3f385c7a7", + ) +} + +func TestMessageKeyGeneration031(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "c385aa1c37c2bfd5cc35fcdbdf601034d39195e1cabff664ceb2b787c15d0225", + "06bf4e60677a13e54c4a38ab824d2ef79da22b690da2b82d0aa3e39a14ca7bdd", + "26b450612ca5e905b937e147", + "22208152be2b1f5f75e6bfcc1f87763d48bb7a74da1be3d102096f257207f8b3", + ) +} + +func TestMessageKeyGeneration032(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "3ff73528f88a50f9d35c0ddba4560bacee5b0462d0f4cb6e91caf41847040ce4", + "850c8a17a23aa761d279d9901015b2bbdfdff00adbf6bc5cf22bd44d24ecabc9", + "4a296a1fb0048e5020d3b129", + "b1bf49a533c4da9b1d629b7ff30882e12d37d49c19abd7b01b7807d75ee13806", + ) +} + +func TestMessageKeyGeneration033(t *testing.T) { + assertMessageKeyGeneration( + t, + "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54", + "2dcf39b9d4c52f1cb9db2d516c43a7c6c3b8c401f6a4ac8f131a9e1059957036", + "17f8057e6156ba7cc5310d01eda8c40f9aa388f9fd1712deb9511f13ecc37d27", + "a8188daff807a1182200b39d", + "47b89da97f68d389867b5d8a2d7ba55715a30e3d88a3cc11f3646bc2af5580ef", + ) +} + +func TestMaxLength(t *testing.T) { + sk1 := keys.GeneratePrivateKey() + sk2 := keys.GeneratePrivateKey() + pub2, _ := keys.GetPublicKeyHex(string(sk2)) + salt := make([]byte, 32) + rand.Read(salt) + conversationKey, _ := GenerateConversationKey(pub2, string(sk1)) + plaintext := strings.Repeat("a", MaxPlaintextSize) + encrypted, err := Encrypt(plaintext, conversationKey, WithCustomNonce(salt)) + if chk.E(err) { + t.Error(err) + } + + assertCryptPub( + t, + string(sk1), + pub2, + fmt.Sprintf("%x", conversationKey), + fmt.Sprintf("%x", salt), + plaintext, + encrypted, + ) +} + +func assertCryptPub( + t *testing.T, + sk1, pub2, conversationKey, salt, plaintext, expected string, +) { + var ( + k1, s []byte + actual, decrypted string + ok bool + err error + ) + k1, err = hex.Dec(conversationKey) + if ok = assert.NoErrorf( + t, err, "hex decode failed for conversation key: %v", err, + ); !ok { + return + } + if ok = assertConversationKeyGenerationPub( + t, sk1, pub2, conversationKey, + ); !ok { + return + } + s, err = hex.Dec(salt) + if ok = assert.NoErrorf( + t, err, "hex decode failed for salt: %v", err, + ); !ok { + return + } + actual, err = Encrypt(plaintext, k1, WithCustomNonce(s)) + if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok { + return + } + if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok { + return + } + decrypted, err = Decrypt(expected, k1) + if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok { + return + } + assert.Equal(t, decrypted, plaintext, "wrong decryption") +} diff --git a/env/config.go b/env/config.go index f075121..fa05de3 100644 --- a/env/config.go +++ b/env/config.go @@ -1,6 +1,6 @@ -// Package config is an implementation of the env.Source interface from +// Package env is an implementation of the env.Source interface from // go-simpler.org -package config +package env import ( "orly.dev/chk" diff --git a/envelopes/authenvelope/authenvelope.go b/envelopes/authenvelope/authenvelope.go index 2e1719c..f5182c5 100644 --- a/envelopes/authenvelope/authenvelope.go +++ b/envelopes/authenvelope/authenvelope.go @@ -4,13 +4,13 @@ package authenvelope import ( "io" - "orly.dev/chk" - envs "orly.dev/envelopes" "orly.dev/errorf" - "orly.dev/event" - "orly.dev/interfaces/codec" "orly.dev/log" + + "orly.dev/codec" + envs "orly.dev/envelopes" + "orly.dev/event" "orly.dev/text" ) @@ -105,8 +105,6 @@ func NewResponse() *Response { return &Response{} } // NewResponseWith creates a new Response with a provided event.E. func NewResponseWith(event *event.E) *Response { return &Response{Event: event} } -func (en *Response) Id() []byte { return en.Event.Id } - // Label returns the label of a auth Response envelope. func (en *Response) Label() string { return L } diff --git a/envelopes/authenvelope/authenvelope_test.go b/envelopes/authenvelope/authenvelope_test.go index a60ae35..8ca0e48 100644 --- a/envelopes/authenvelope/authenvelope_test.go +++ b/envelopes/authenvelope/authenvelope_test.go @@ -2,10 +2,10 @@ package authenvelope import ( "bytes" + "orly.dev/chk" "testing" "orly.dev/auth" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/p256k" ) @@ -27,7 +27,7 @@ func TestAuth(t *testing.T) { copy(oChal, b1) var rem []byte var l string - if l, b1 = envelopes.Identify(b1); chk.E(err) { + if l, b1, err = envelopes.Identify(b1); chk.E(err) { t.Fatal(err) } if l != L { @@ -62,7 +62,7 @@ func TestAuth(t *testing.T) { b3 = resp.Marshal(b3) oResp := make([]byte, len(b3)) copy(oResp, b3) - if l, b3 = envelopes.Identify(b3); chk.E(err) { + if l, b3, err = envelopes.Identify(b3); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/closedenvelope/closedenvelope.go b/envelopes/closedenvelope/closedenvelope.go index 9695dd8..8baa5d9 100644 --- a/envelopes/closedenvelope/closedenvelope.go +++ b/envelopes/closedenvelope/closedenvelope.go @@ -5,10 +5,10 @@ package closedenvelope import ( "io" - "orly.dev/chk" + + "orly.dev/codec" "orly.dev/envelopes" - "orly.dev/interfaces/codec" "orly.dev/subscription" "orly.dev/text" ) diff --git a/envelopes/closedenvelope/closedenvelope_test.go b/envelopes/closedenvelope/closedenvelope_test.go index acfe1ad..5f684d1 100644 --- a/envelopes/closedenvelope/closedenvelope_test.go +++ b/envelopes/closedenvelope/closedenvelope_test.go @@ -2,11 +2,11 @@ package closedenvelope import ( "bytes" + "orly.dev/chk" "testing" "lukechampine.com/frand" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/subscription" ) @@ -43,7 +43,7 @@ func TestMarshalUnmarshal(t *testing.T) { copy(rb1, rb) var rem []byte var l string - if l, rb = envelopes.Identify(rb); chk.E(err) { + if l, rb, err = envelopes.Identify(rb); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/closeenvelope/closeenvelope.go b/envelopes/closeenvelope/closeenvelope.go index ff43164..1431c37 100644 --- a/envelopes/closeenvelope/closeenvelope.go +++ b/envelopes/closeenvelope/closeenvelope.go @@ -4,10 +4,10 @@ package closeenvelope import ( "io" - "orly.dev/chk" + + "orly.dev/codec" "orly.dev/envelopes" - "orly.dev/interfaces/codec" "orly.dev/subscription" ) diff --git a/envelopes/closeenvelope/closeenvelope_test.go b/envelopes/closeenvelope/closeenvelope_test.go index 55cd6d7..e02d4b9 100644 --- a/envelopes/closeenvelope/closeenvelope_test.go +++ b/envelopes/closeenvelope/closeenvelope_test.go @@ -2,9 +2,9 @@ package closeenvelope import ( "bytes" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/subscription" ) @@ -25,7 +25,7 @@ func TestMarshalUnmarshal(t *testing.T) { copy(rb1, rb) var rem []byte var l string - if l, rb = envelopes.Identify(rb); chk.E(err) { + if l, rb, err = envelopes.Identify(rb); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/countenvelope/countenvelope.go b/envelopes/countenvelope/countenvelope.go index 5572855..d98ed05 100644 --- a/envelopes/countenvelope/countenvelope.go +++ b/envelopes/countenvelope/countenvelope.go @@ -5,12 +5,12 @@ package countenvelope import ( "bytes" "io" - "orly.dev/chk" - "orly.dev/envelopes" "orly.dev/errorf" + + "orly.dev/codec" + "orly.dev/envelopes" "orly.dev/filters" - "orly.dev/interfaces/codec" "orly.dev/ints" "orly.dev/subscription" "orly.dev/text" diff --git a/envelopes/countenvelope/countenvelope_test.go b/envelopes/countenvelope/countenvelope_test.go index f359d63..6f458c6 100644 --- a/envelopes/countenvelope/countenvelope_test.go +++ b/envelopes/countenvelope/countenvelope_test.go @@ -2,9 +2,9 @@ package countenvelope import ( "bytes" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/filters" "orly.dev/subscription" @@ -30,7 +30,7 @@ func TestRequest(t *testing.T) { copy(rb1, rb) var rem []byte var l string - if l, rb = envelopes.Identify(rb); chk.E(err) { + if l, rb, err = envelopes.Identify(rb); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/eid/eid.go b/envelopes/eid/eid.go deleted file mode 100644 index 4ef65ac..0000000 --- a/envelopes/eid/eid.go +++ /dev/null @@ -1,5 +0,0 @@ -package eid - -type Ider interface { - Id() []byte -} diff --git a/envelopes/eoseenvelope/eoseenvelope.go b/envelopes/eoseenvelope/eoseenvelope.go index cd820b9..954fac1 100644 --- a/envelopes/eoseenvelope/eoseenvelope.go +++ b/envelopes/eoseenvelope/eoseenvelope.go @@ -6,11 +6,10 @@ package eoseenvelope import ( "io" - "orly.dev/log" - "orly.dev/chk" + + "orly.dev/codec" "orly.dev/envelopes" - "orly.dev/interfaces/codec" "orly.dev/subscription" ) @@ -40,7 +39,6 @@ func (en *T) Label() string { return L } // Write the eoseenvelope.T to a provided io.Writer. func (en *T) Write(w io.Writer) (err error) { - log.I.F("writing EOSE to %s", en.Subscription.String()) _, err = w.Write(en.Marshal(nil)) return } diff --git a/envelopes/eoseenvelope/eoseenvelope_test.go b/envelopes/eoseenvelope/eoseenvelope_test.go index c1d7c00..8854dec 100644 --- a/envelopes/eoseenvelope/eoseenvelope_test.go +++ b/envelopes/eoseenvelope/eoseenvelope_test.go @@ -2,9 +2,9 @@ package eoseenvelope import ( "bytes" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/subscription" ) @@ -26,7 +26,7 @@ func TestMarshalUnmarshal(t *testing.T) { copy(rb1, rb) var rem []byte var l string - if l, rb = envelopes.Identify(rb); chk.E(err) { + if l, rb, err = envelopes.Identify(rb); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/eventenvelope/eventenvelope.go b/envelopes/eventenvelope/eventenvelope.go index 817bfa3..8e59e5f 100644 --- a/envelopes/eventenvelope/eventenvelope.go +++ b/envelopes/eventenvelope/eventenvelope.go @@ -4,12 +4,12 @@ package eventenvelope import ( "io" - "orly.dev/chk" - "orly.dev/envelopes" "orly.dev/errorf" + + "orly.dev/codec" + "orly.dev/envelopes" "orly.dev/event" - "orly.dev/interfaces/codec" "orly.dev/subscription" ) @@ -29,8 +29,6 @@ func NewSubmission() *Submission { return &Submission{E: &event.E{}} } // NewSubmissionWith creates a new eventenvelope.Submission with a provided event.E. func NewSubmissionWith(ev *event.E) *Submission { return &Submission{E: ev} } -func (en *Submission) Id() []byte { return en.E.Id } - // Label returns the label of a event eventenvelope.Submission envelope. func (en *Submission) Label() string { return L } @@ -105,8 +103,6 @@ func NewResultWith[V string | []byte](s V, ev *event.E) ( return &Result{subscription.MustNew(s), ev}, nil } -func (en *Result) Id() []byte { return en.Event.Id } - // Label returns the label of a event eventenvelope.Result envelope. func (en *Result) Label() string { return L } diff --git a/envelopes/eventenvelope/eventenvelope_test.go b/envelopes/eventenvelope/eventenvelope_test.go index ac7a0cf..b1f420d 100644 --- a/envelopes/eventenvelope/eventenvelope_test.go +++ b/envelopes/eventenvelope/eventenvelope_test.go @@ -3,9 +3,9 @@ package eventenvelope import ( "bufio" "bytes" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/event" "orly.dev/event/examples" @@ -14,7 +14,6 @@ import ( func TestSubmission(t *testing.T) { scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) - scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000) var c, rem, out []byte var err error for scanner.Scan() { @@ -34,7 +33,7 @@ func TestSubmission(t *testing.T) { rem = ea.Marshal(rem) c = append(c, rem...) var l string - if l, rem = envelopes.Identify(rem); chk.E(err) { + if l, rem, err = envelopes.Identify(rem); chk.E(err) { t.Fatal(err) } if l != L { @@ -59,7 +58,6 @@ func TestSubmission(t *testing.T) { func TestResult(t *testing.T) { scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) - scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000) var c, rem, out []byte var err error for scanner.Scan() { @@ -83,7 +81,7 @@ func TestResult(t *testing.T) { rem = ea.Marshal(rem) c = append(c, rem...) var l string - if l, rem = envelopes.Identify(rem); chk.E(err) { + if l, rem, err = envelopes.Identify(rem); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/identify.go b/envelopes/identify.go index 4f54b89..435aa81 100644 --- a/envelopes/identify.go +++ b/envelopes/identify.go @@ -1,12 +1,11 @@ package envelopes // Identify handles determining what kind of codec.Envelope is, by the Label, -// the first step in identifying the structure of the message. -// -// This first step is not enough because the same labels are used on several -// codec.Envelope types in the nostr specification. The rest of the context is -// in whether this is a client or a relay receiving it. -func Identify(b []byte) (t string, rem []byte) { +// the first step in identifying the structure of the message. This first step +// is not sufficient because the same labels are used on several codec.Envelope +// types in the nostr specification. The rest of the context is in whether this +// is a client or a relay receiving it. +func Identify(b []byte) (t string, rem []byte, err error) { var openBrackets, openQuotes, afterQuotes bool var label []byte rem = b diff --git a/envelopes/noticeenvelope/noticeenvelope.go b/envelopes/noticeenvelope/noticeenvelope.go index 665bd19..0d4632f 100644 --- a/envelopes/noticeenvelope/noticeenvelope.go +++ b/envelopes/noticeenvelope/noticeenvelope.go @@ -5,10 +5,10 @@ package noticeenvelope import ( "io" - "orly.dev/chk" + + "orly.dev/codec" "orly.dev/envelopes" - "orly.dev/interfaces/codec" "orly.dev/text" ) diff --git a/envelopes/noticeenvelope/noticeenvelope_test.go b/envelopes/noticeenvelope/noticeenvelope_test.go index 06c0c0c..fbe01d1 100644 --- a/envelopes/noticeenvelope/noticeenvelope_test.go +++ b/envelopes/noticeenvelope/noticeenvelope_test.go @@ -2,9 +2,9 @@ package noticeenvelope import ( "bytes" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/envelopes/messages" ) @@ -21,7 +21,7 @@ func TestMarshalUnmarshal(t *testing.T) { copy(rb1, rb) var rem []byte var l string - if l, rb = envelopes.Identify(rb); chk.E(err) { + if l, rb, err = envelopes.Identify(rb); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/okenvelope/okenvelope.go b/envelopes/okenvelope/okenvelope.go index d3b8bc5..e9d16de 100644 --- a/envelopes/okenvelope/okenvelope.go +++ b/envelopes/okenvelope/okenvelope.go @@ -1,19 +1,18 @@ // Package okenvelope is a codec for the OK message, which is an acknowledgement // for an EVENT eventenvelope.Submission, containing true/false and if false a -// message with a machine-readable error type as found in the 'messages' -// package. +// message with a machine readable error type as found in the messages package. package okenvelope import ( "io" - - "github.com/minio/sha256-simd" "orly.dev/chk" - "orly.dev/envelopes" "orly.dev/errorf" - "orly.dev/eventid" - "orly.dev/interfaces/codec" "orly.dev/log" + + "orly.dev/codec" + "orly.dev/envelopes" + "orly.dev/eventid" + "orly.dev/sha256" "orly.dev/text" ) @@ -57,9 +56,7 @@ func (en *T) ReasonString() string { return string(en.Reason) } // Write the okenvelope.T to a provided io.Writer. func (en *T) Write(w io.Writer) (err error) { - msg := en.Marshal(nil) - log.T.F("%s", msg) - _, err = w.Write(msg) + _, err = w.Write(en.Marshal(nil)) return } diff --git a/envelopes/okenvelope/okenvelope_test.go b/envelopes/okenvelope/okenvelope_test.go index f82777c..60f2348 100644 --- a/envelopes/okenvelope/okenvelope_test.go +++ b/envelopes/okenvelope/okenvelope_test.go @@ -2,9 +2,9 @@ package okenvelope import ( "bytes" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/envelopes/messages" "orly.dev/eventid" @@ -23,7 +23,7 @@ func TestMarshalUnmarshal(t *testing.T) { copy(rb1, rb) var rem []byte var l string - if l, rb = envelopes.Identify(rb); chk.E(err) { + if l, rb, err = envelopes.Identify(rb); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/envelopes/reqenvelope/reqenvelope.go b/envelopes/reqenvelope/reqenvelope.go index 705fa12..a4a06ab 100644 --- a/envelopes/reqenvelope/reqenvelope.go +++ b/envelopes/reqenvelope/reqenvelope.go @@ -4,11 +4,11 @@ package reqenvelope import ( "io" - "orly.dev/chk" + + "orly.dev/codec" "orly.dev/envelopes" "orly.dev/filters" - "orly.dev/interfaces/codec" "orly.dev/subscription" "orly.dev/text" ) @@ -102,7 +102,7 @@ func (en *T) Unmarshal(b []byte) (r []byte, err error) { // Parse reads a REQ envelope from minified JSON into a newly allocated // reqenvelope.T. -func Parse(b []byte) (t *T, rem []byte, err error) { +func (en *T) Parse(b []byte) (t *T, rem []byte, err error) { t = New() if rem, err = t.Unmarshal(b); chk.E(err) { return diff --git a/envelopes/reqenvelope/reqenvelope_test.go b/envelopes/reqenvelope/reqenvelope_test.go index 04ef118..a00325e 100644 --- a/envelopes/reqenvelope/reqenvelope_test.go +++ b/envelopes/reqenvelope/reqenvelope_test.go @@ -2,9 +2,9 @@ package reqenvelope import ( "bytes" + "orly.dev/chk" "testing" - "orly.dev/chk" "orly.dev/envelopes" "orly.dev/filters" "orly.dev/subscription" @@ -30,7 +30,7 @@ func TestMarshalUnmarshal(t *testing.T) { copy(rb1, rb) var rem []byte var l string - if l, rb = envelopes.Identify(rb); chk.E(err) { + if l, rb, err = envelopes.Identify(rb); chk.E(err) { t.Fatal(err) } if l != L { diff --git a/event/binary.go b/event/binary.go index 2d64090..c5a834f 100644 --- a/event/binary.go +++ b/event/binary.go @@ -59,7 +59,7 @@ func (ev *E) UnmarshalBinary(r io.Reader) (err error) { if ca, err = varint.Decode(r); chk.E(err) { return } - ev.CreatedAt = timestamp.New(ca) + ev.CreatedAt = timestamp.New(int64(ca)) var k uint64 if k, err = varint.Decode(r); chk.E(err) { return @@ -69,13 +69,13 @@ func (ev *E) UnmarshalBinary(r io.Reader) (err error) { if nTags, err = varint.Decode(r); chk.E(err) { return } - ev.Tags = tags.NewWithCap(nTags) + ev.Tags = tags.NewWithCap(int(nTags)) for range nTags { var nField uint64 if nField, err = varint.Decode(r); chk.E(err) { return } - t := tag.NewWithCap(nField) + t := tag.NewWithCap(int(nField)) for range nField { var lenField uint64 if lenField, err = varint.Decode(r); chk.E(err) { diff --git a/event/json.go b/event/json.go index 5801248..da54414 100644 --- a/event/json.go +++ b/event/json.go @@ -259,7 +259,7 @@ InVal: if !bytes.Equal(jCreatedAt, key) { goto invalid } - ev.CreatedAt = timestamp.New(uint(0)) + ev.CreatedAt = timestamp.New(int64(0)) if r, err = ev.CreatedAt.Unmarshal(r); chk.T(err) { return } diff --git a/eventid/eventid.go b/eventid/eventid.go index 1190f85..4402974 100644 --- a/eventid/eventid.go +++ b/eventid/eventid.go @@ -4,12 +4,12 @@ package eventid import ( "lukechampine.com/frand" - - "github.com/minio/sha256-simd" "orly.dev/chk" "orly.dev/errorf" - "orly.dev/hex" "orly.dev/log" + + "orly.dev/hex" + "orly.dev/sha256" ) // T is the SHA256 hash in hexadecimal of the canonical form of an event as diff --git a/filter/filter.go b/filter/filter.go index 3397062..2e0e5be 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -1,33 +1,27 @@ -// Package filter is a codec for the nostr filter (queries) and includes: -// -// - tools for matching them to events -// -// - a canonical format scheme to enable compactly -// -// - identifying subscription filters -// -// - a simplified filter that leaves out the IDs and Search fields for -// use in the HTTP API. +// Package filter is a codec for nostr filters (queries) and includes tools for +// matching them to events, a canonical format scheme to enable compactly +// identifying subscription filters, and a simplified filter that leavse out the +// IDs and Search fields for use in the HTTP API. package filter import ( "bytes" "encoding/binary" + "orly.dev/chk" + "orly.dev/errorf" "sort" "lukechampine.com/frand" - "github.com/minio/sha256-simd" - "orly.dev/chk" "orly.dev/ec/schnorr" "orly.dev/ec/secp256k1" - "orly.dev/errorf" "orly.dev/event" "orly.dev/hex" "orly.dev/ints" "orly.dev/kind" "orly.dev/kinds" - "orly.dev/pointers" + "orly.dev/realy/pointers" + "orly.dev/sha256" "orly.dev/tag" "orly.dev/tags" "orly.dev/text" @@ -36,41 +30,36 @@ import ( // F is the primary query form for requesting events from a nostr relay. // -// The ordering of the fields of filters is not specified as in the protocol -// there is no requirement to generate a hash for fast recognition of identical -// filters. -// -// However, for internal use in a relay, by applying a consistent sort order, -// this library will produce an identical JSON from the same *set* of fields no -// matter what order they were provided. +// The ordering of fields of filters is not specified as in the protocol there +// is no requirement to generate a hash for fast recognition of identical +// filters. However, for internal use in a relay, by applying a consistent sort +// order, this library will produce an identical JSON from the same *set* of +// fields no matter what order they were provided. // // This is to facilitate the deduplication of filters so an effective identical // match is not performed on an identical filter. type F struct { - Ids *tag.T `json:"ids,omitempty"` - Kinds *kinds.T `json:"kinds,omitempty"` - Authors *tag.T `json:"authors,omitempty"` - // Tags are internally stored with the key being prefixed with # and a-zA-Z - // as the second character in the first field of a tag.T, but when marshaled - // render as an object key that if not present is not rendered. - Tags *tags.T `json:"-,omitempty"` - Since *timestamp.T `json:"since,omitempty"` - Until *timestamp.T `json:"until,omitempty"` - Search []byte `json:"search,omitempty"` - Limit *uint `json:"limit,omitempty"` + Ids *tag.T `json:"ids,omitempty"` + Kinds *kinds.T `json:"kinds,omitempty"` + Authors *tag.T `json:"authors,omitempty"` + Tags *tags.T `json:"-,omitempty"` + Since *timestamp.T `json:"since,omitempty"` + Until *timestamp.T `json:"until,omitempty"` + Search []byte `json:"search,omitempty"` + Limit *uint `json:"limit,omitempty"` } -// New creates a new, reasonably initialized filter that will be ready for most -// uses without further allocations. +// New creates a new, reasonably initialized filter that will be ready for most uses without +// further allocations. func New() (f *F) { return &F{ Ids: tag.NewWithCap(10), Kinds: kinds.NewWithCap(10), Authors: tag.NewWithCap(10), Tags: tags.New(), - Since: new(timestamp.T), - Until: new(timestamp.T), - Search: nil, + // Since: timestamp.New(), + // Until: timestamp.New(), + Search: nil, } } @@ -119,8 +108,8 @@ var ( Search = []byte("search") ) -// Marshal a filter into raw JSON bytes, minified. The field ordering and sort -// of fields is canonicalized so that a hash can identify the same filter. +// Marshal a filter into raw JSON bytes, minified. The field ordering and sort of fields is +// canonicalized so that a hash can identify the same filter. func (f *F) Marshal(dst []byte) (b []byte) { var err error _ = err @@ -159,9 +148,8 @@ func (f *F) Marshal(dst []byte) (b []byte) { // } else { // first = true // } - // - // tags are stored as tags with the initial element the "#a" and the - // rest the list in each element of the tags list. eg: + // tags are stored as tags with the initial element the "#a" and the rest the list in + // each element of the tags list. eg: // // [["#p",""," 'z' || tKey[1] < 'A' && tKey[1] > 'Z') { - // the first "key" field must begin with '#' and the second be alpha + // first "key" field must begin with '#' and second be alpha continue } values := tg.ToSliceOfBytes()[1:] @@ -267,7 +254,7 @@ const ( // Unmarshal a filter from raw (minified) JSON bytes into the runtime format. // -// todo: this does not tolerate whitespace, but it's bleeding fast. +// todo: this may tolerate whitespace, not certain currently. func (f *F) Unmarshal(b []byte) (r []byte, err error) { r = b[:] var key []byte @@ -316,7 +303,7 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) { copy(k, key) switch key[1] { case 'e', 'p': - // the tags must all be 64-character hexadecimal + // the tags must all be 64 character hexadecimal var ff [][]byte if ff, r, err = text.UnmarshalHexArray( r, @@ -326,7 +313,7 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) { } ff = append([][]byte{k}, ff...) f.Tags = f.Tags.AppendTags(tag.New(ff...)) - // f.Tags.E = append(f.Tags.E, tag.New(ff...)) + // f.Tags.F = append(f.Tags.F, tag.New(ff...)) default: // other types of tags can be anything var ff [][]byte @@ -335,7 +322,7 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) { } ff = append([][]byte{k}, ff...) f.Tags = f.Tags.AppendTags(tag.New(ff...)) - // f.Tags.E = append(f.Tags.E, tag.New(ff...)) + // f.Tags.F = append(f.Tags.F, tag.New(ff...)) } state = betweenKV case IDs[0]: @@ -431,10 +418,14 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) { } if r[0] == '}' { state = afterClose + // log.I.Ln("afterClose") + // rem = rem[1:] } else if r[0] == ',' { state = openParen + // log.I.Ln("openParen") } else if r[0] == '"' { state = inKey + // log.I.Ln("inKey") } } if len(r) == 0 { @@ -453,35 +444,50 @@ invalid: // Matches checks a filter against an event and determines if the event matches the filter. func (f *F) Matches(ev *event.E) bool { if ev == nil { + // log.F.ToSliceOfBytes("nil event") return false } if f.Ids.Len() > 0 && !f.Ids.Contains(ev.Id) { + // log.F.ToSliceOfBytes("no ids in filter match event\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String()) return false } if f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) { + // log.F.ToSliceOfBytes("no matching kinds in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String()) return false } if f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) { + // log.F.ToSliceOfBytes("no matching authors in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String()) return false } if f.Tags.Len() > 0 && !ev.Tags.Intersects(f.Tags) { return false } + // if f.Tags.Len() > 0 { + // for _, v := range f.Tags.ToSliceOfTags() { + // tvs := v.ToSliceOfBytes() + // if !ev.Tags.ContainsAny(v.FilterKey(), tag.New(tvs...)) { + // return false + // } + // } + // return false + // } if f.Since.Int() != 0 && ev.CreatedAt.I64() < f.Since.I64() { + // log.F.ToSliceOfBytes("event is older than since\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String()) return false } if f.Until.Int() != 0 && ev.CreatedAt.I64() > f.Until.I64() { + // log.F.ToSliceOfBytes("event is newer than until\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String()) return false } return true } -// Fingerprint returns an 8 byte truncated sha256 hash of the filter in the -// canonical form created by Marshal. +// Fingerprint returns an 8 byte truncated sha256 hash of the filter in the canonical form +// created by Marshal. // -// This hash is generated via the JSON encoded form of the filter, with the -// Limit field removed. This value should be set to zero after all results from -// a query of stored events, as per NIP-01. +// This hash is generated via the JSON encoded form of the filter, with the Limit field removed. +// This value should be set to zero after all results from a query of stored events, as per +// NIP-01. func (f *F) Fingerprint() (fp uint64, err error) { lim := f.Limit f.Limit = nil @@ -494,8 +500,8 @@ func (f *F) Fingerprint() (fp uint64, err error) { return } -// Sort the fields of a filter so a fingerprint on a filter that has the same -// set of content produces the same fingerprint. +// Sort the fields of a filter so a fingerprint on a filter that has the same set of content +// produces the same fingerprint. func (f *F) Sort() { if f.Ids != nil { sort.Sort(f.Ids) @@ -521,8 +527,7 @@ func arePointerValuesEqual[V comparable](a *V, b *V) bool { return false } -// Equal checks a filter against another filter to see if they are the same -// filter. +// Equal checks a filter against another filter to see if they are the same filter. func (f *F) Equal(b *F) bool { // sort the fields so they come out the same f.Sort() @@ -545,8 +550,9 @@ func GenFilter() (f *F, err error) { n := frand.Intn(16) for _ = range n { id := make([]byte, sha256.Size) - _, _ = frand.Read(id) + frand.Read(id) f.Ids = f.Ids.Append(id) + // f.Ids.Field = append(f.Ids.Field, id) } n = frand.Intn(16) for _ = range n { @@ -560,6 +566,7 @@ func GenFilter() (f *F, err error) { } pk := sk.PubKey() f.Authors = f.Authors.Append(schnorr.SerializePubKey(pk)) + // f.Authors.Field = append(f.Authors.Field, schnorr.SerializePubKey(pk)) } a := frand.Intn(16) if a < n { @@ -575,22 +582,24 @@ func GenFilter() (f *F, err error) { var idb [][]byte for range l { id := make([]byte, sha256.Size) - _, _ = frand.Read(id) + frand.Read(id) idb = append(idb, id) } idb = append([][]byte{{'#', byte(b)}}, idb...) f.Tags = f.Tags.AppendTags(tag.FromBytesSlice(idb...)) + // f.Tags.F = append(f.Tags.F, tag.FromBytesSlice(idb...)) } else { var idb [][]byte for range l { bb := make([]byte, frand.Intn(31)+1) - _, _ = frand.Read(bb) + frand.Read(bb) id := make([]byte, 0, len(bb)*2) id = hex.EncAppend(id, bb) idb = append(idb, id) } idb = append([][]byte{{'#', byte(b)}}, idb...) f.Tags = f.Tags.AppendTags(tag.FromBytesSlice(idb...)) + // f.Tags.F = append(f.Tags.F, tag.FromBytesSlice(idb...)) } } tn := int(timestamp.Now().I64()) diff --git a/filter/filter_test.go b/filter/filter_test.go index 16e755e..60a5d01 100644 --- a/filter/filter_test.go +++ b/filter/filter_test.go @@ -2,9 +2,8 @@ package filter import ( "bytes" - "testing" - "orly.dev/chk" + "testing" ) func TestT_MarshalUnmarshal(t *testing.T) { diff --git a/filter/simple.go b/filter/simple.go index 297a5e7..5f26489 100644 --- a/filter/simple.go +++ b/filter/simple.go @@ -2,17 +2,17 @@ package filter import ( "encoding/binary" + "orly.dev/chk" + "orly.dev/errorf" "sort" - "github.com/minio/sha256-simd" - "orly.dev/chk" "orly.dev/ec/schnorr" - "orly.dev/errorf" "orly.dev/event" "orly.dev/hex" "orly.dev/ints" "orly.dev/kinds" - "orly.dev/pointers" + "orly.dev/realy/pointers" + "orly.dev/sha256" "orly.dev/tag" "orly.dev/tags" "orly.dev/text" @@ -253,7 +253,7 @@ func (f *S) Unmarshal(b []byte) (r []byte, err error) { } ff = append([][]byte{k}, ff...) f.Tags = f.Tags.AppendTags(tag.New(ff...)) - // s.Tags.E = append(s.Tags.E, tag.New(ff...)) + // s.Tags.F = append(s.Tags.F, tag.New(ff...)) default: // other types of tags can be anything var ff [][]byte @@ -262,7 +262,7 @@ func (f *S) Unmarshal(b []byte) (r []byte, err error) { } ff = append([][]byte{k}, ff...) f.Tags = f.Tags.AppendTags(tag.New(ff...)) - // s.Tags.E = append(s.Tags.E, tag.New(ff...)) + // s.Tags.F = append(s.Tags.F, tag.New(ff...)) } state = betweenKV case Kinds[0]: @@ -322,15 +322,15 @@ invalid: // Matches checks if a filter.S matches an event. func (f *S) Matches(ev *event.E) bool { if ev == nil { - // log.E.ToSliceOfBytes("nil event") + // log.F.ToSliceOfBytes("nil event") return false } if f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) { - // log.E.ToSliceOfBytes("no matching kinds in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String()) + // log.F.ToSliceOfBytes("no matching kinds in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String()) return false } if f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) { - // log.E.ToSliceOfBytes("no matching authors in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String()) + // log.F.ToSliceOfBytes("no matching authors in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String()) return false } if f.Tags.Len() > 0 && !ev.Tags.Intersects(f.Tags) { diff --git a/filters/filters.go b/filters/filters.go index b373c1a..70a1fab 100644 --- a/filters/filters.go +++ b/filters/filters.go @@ -34,7 +34,7 @@ func (f *T) Len() int { return len(f.F) } // New creates a new filters.T out of a variadic list of filter.F. func New(ff ...*filter.F) (f *T) { return &T{F: ff} } -// Match checks if a set of filters.T matches on an event.E. +// Match checks if a set of filters.T matches on an event.F. func (f *T) Match(event *event.E) bool { for _, f := range f.F { if f.Matches(event) { diff --git a/filters/filters_test.go b/filters/filters_test.go index 569f6ba..b9666e8 100644 --- a/filters/filters_test.go +++ b/filters/filters_test.go @@ -2,9 +2,8 @@ package filters import ( "bytes" - "testing" - "orly.dev/chk" + "testing" ) func TestT_MarshalUnmarshal(t *testing.T) { diff --git a/go.mod b/go.mod index 653f88e..5c913f2 100644 --- a/go.mod +++ b/go.mod @@ -1,54 +1,66 @@ module orly.dev -go 1.24.4 +go 1.24.2 require ( github.com/adrg/xdg v0.5.3 + github.com/alexflint/go-arg v1.6.0 github.com/danielgtaylor/huma/v2 v2.34.1 - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/davecgh/go-spew v1.1.1 github.com/dgraph-io/badger/v4 v4.7.0 github.com/fasthttp/websocket v1.5.12 github.com/fatih/color v1.18.0 github.com/gobwas/httphead v0.1.0 - github.com/gobwas/ws v1.2.1 + github.com/gobwas/ws v1.4.0 github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 + github.com/klauspost/cpuid/v2 v2.2.11 github.com/minio/sha256-simd v1.0.1 github.com/pkg/profile v1.7.0 + github.com/puzpuzpuz/xsync/v3 v3.5.1 github.com/rs/cors v1.11.1 github.com/stretchr/testify v1.10.0 github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b go-simpler.org/env v0.12.0 go.uber.org/atomic v1.11.0 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b + golang.org/x/crypto v0.40.0 + golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc + golang.org/x/lint v0.0.0-20241112194109-818c5a804067 + golang.org/x/net v0.42.0 + golang.org/x/sync v0.16.0 + honnef.co/go/tools v0.6.1 lukechampine.com/frand v1.5.1 ) require ( - github.com/andybalholm/brotli v1.1.1 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/alexflint/go-scalar v1.2.0 // indirect + github.com/andybalholm/brotli v1.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.5 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/google/flatbuffers v25.2.10+incompatible // indirect github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.11 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/savsgio/gotils v0.0.0-20250408102913-196191ec6287 // indirect github.com/templexxx/cpu v0.1.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasthttp v1.62.0 // indirect + github.com/valyala/fasthttp v1.63.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect - golang.org/x/net v0.40.0 // indirect - golang.org/x/sys v0.33.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20250711185948-6ae5c78190dc // indirect + golang.org/x/mod v0.26.0 // indirect + golang.org/x/sys v0.34.0 // indirect + golang.org/x/text v0.27.0 // indirect + golang.org/x/tools v0.35.0 // indirect google.golang.org/protobuf v1.36.6 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index b51e204..743cffa 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,13 @@ +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ= -github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= -github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/alexflint/go-arg v1.6.0 h1:wPP9TwTPO54fUVQl4nZoxbFfKCcy5E6HBCumj1XVRSo= +github.com/alexflint/go-arg v1.6.0/go.mod h1:A7vTJzvjoaSTypg4biM5uYNTkJ27SkNTArtYXnlqVO8= +github.com/alexflint/go-scalar v1.2.0 h1:WR7JPKkeNpnYIOfHRa7ivM21aWAdHD0gEWHCx+WQBRw= +github.com/alexflint/go-scalar v1.2.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o= +github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= @@ -16,9 +22,8 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38 github.com/danielgtaylor/huma/v2 v2.34.1 h1:EmOJAbzEGfy0wAq/QMQ1YKfEMBEfE94xdBRLPBP0gwQ= github.com/danielgtaylor/huma/v2 v2.34.1/go.mod h1:ynwJgLk8iGVgoaipi5tgwIQ5yoFNmiu+QdhU7CEEmhk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y= github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA= github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= @@ -35,16 +40,17 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= +github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -79,17 +85,19 @@ github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5 github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc= -github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg= +github.com/savsgio/gotils v0.0.0-20250408102913-196191ec6287 h1:qIQ0tWF9vxGtkJa24bR+2i53WBCz1nW/Pc47oVYauC4= +github.com/savsgio/gotils v0.0.0-20250408102913-196191ec6287/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= @@ -101,31 +109,58 @@ github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3W github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.62.0 h1:8dKRBX/y2rCzyc6903Zu1+3qN0H/d2MsxPPmVNamiH0= -github.com/valyala/fasthttp v1.62.0/go.mod h1:FCINgr4GKdKqV8Q0xv8b+UxPV+H/O5nNFo3D+r54Htg= +github.com/valyala/fasthttp v1.63.0 h1:DisIL8OjB7ul2d7cBaMRcKTQDYnrGy56R4FCiuDP0Ns= +github.com/valyala/fasthttp v1.63.0/go.mod h1:REc4IeW+cAEyLrRPa5A81MIjvz0QE1laoTX2EaPHKJM= github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs= go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc h1:TS73t7x3KarrNd5qAipmspBDS1rkMcgVG/fS1aRb4Rc= +golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= +golang.org/x/exp/typeparams v0.0.0-20250711185948-6ae5c78190dc h1:mPO8OXAJgNBiEFwAG1Lh4pe7uxJgEWPk+io1+SzvMfk= +golang.org/x/exp/typeparams v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:LKZHyeOpPuZcMgxeHjJp4p5yvxrCX1xDvH10zYHhjjQ= +golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA= +golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -134,5 +169,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w= lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q= diff --git a/helpers/helpers.go b/helpers/helpers.go deleted file mode 100644 index a25980f..0000000 --- a/helpers/helpers.go +++ /dev/null @@ -1,49 +0,0 @@ -package helpers - -import ( - "net/http" - "strings" -) - -func GenerateDescription(text string, scopes []string) string { - if len(scopes) == 0 { - return text - } - result := make([]string, 0) - for _, value := range scopes { - result = append(result, "`"+value+"`") - } - return text + "

**Scopes**
" + strings.Join(result, ", ") -} - -func GetRemoteFromReq(r *http.Request) (rr string) { - // reverse proxy should populate this field so we see the remote not the - // proxy - remoteAddress := r.Header.Get("X-Forwarded-For") - if remoteAddress == "" { - remoteAddress = r.Header.Get("Forwarded") - if remoteAddress == "" { - rr = r.RemoteAddr - return - } else { - splitted := strings.Split(remoteAddress, ", ") - if len(splitted) >= 1 { - forwarded := strings.Split(splitted[0], "=") - if len(forwarded) == 2 { - // by the standard this should be the address of the client. - rr = splitted[1] - } - return - } - } - } - splitted := strings.Split(remoteAddress, " ") - if len(splitted) == 1 { - rr = splitted[0] - } - if len(splitted) == 2 { - sp := strings.Split(splitted[0], ",") - rr = sp[0] - } - return -} diff --git a/hex/aliases.go b/hex/aliases.go index 43dbb39..5b87794 100644 --- a/hex/aliases.go +++ b/hex/aliases.go @@ -4,11 +4,10 @@ package hex import ( "encoding/hex" - - "github.com/templexxx/xhex" - "orly.dev/chk" "orly.dev/errorf" + + "github.com/templexxx/xhex" ) var Enc = hex.EncodeToString @@ -23,7 +22,7 @@ var DecLen = hex.DecodedLen type InvalidByteError = hex.InvalidByteError -// EncAppend uses xhex to encode a slice of bytes and appends it to a provided destination slice. +// EncAppend uses xhex to encode a sice of bytes and appends it to a provided destination slice. func EncAppend(dst, src []byte) (b []byte) { l := len(dst) dst = append(dst, make([]byte, len(src)*2)...) diff --git a/httpauth/nip98auth.go b/httpauth/nip98auth.go index 2438d7b..3b2bf46 100644 --- a/httpauth/nip98auth.go +++ b/httpauth/nip98auth.go @@ -4,12 +4,12 @@ import ( "encoding/base64" "net/http" "net/url" + "orly.dev/chk" + "orly.dev/log" "strings" - "orly.dev/chk" "orly.dev/event" "orly.dev/kind" - "orly.dev/log" "orly.dev/signer" "orly.dev/tag" "orly.dev/tags" diff --git a/httpauth/validate.go b/httpauth/validate.go index 9492160..0457249 100644 --- a/httpauth/validate.go +++ b/httpauth/validate.go @@ -4,17 +4,16 @@ import ( "encoding/base64" "fmt" "net/http" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" "strings" "time" - "orly.dev/chk" - "orly.dev/errorf" "orly.dev/event" "orly.dev/ints" "orly.dev/kind" "orly.dev/tag" - - "orly.dev/log" ) var ErrMissingKey = fmt.Errorf( @@ -29,7 +28,6 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) ( pubkey []byte, err error, ) { val := r.Header.Get(HeaderKey) - log.I.F(val) if val == "" { err = ErrMissingKey valid = true @@ -46,7 +44,6 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) ( log.I.F("validating auth '%s'", val) switch { case strings.HasPrefix(val, NIP98Prefix): - log.T.F(val) split := strings.Split(val, " ") if len(split) == 1 { err = errorf.E( @@ -56,8 +53,7 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) ( } if len(split) > 2 { err = errorf.E( - "extraneous content after second field space separated: %s", - val, + "extraneous content after second field space separated: %s", val, ) return } @@ -79,8 +75,7 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) ( if !ev.Kind.Equal(kind.HTTPAuth) { err = errorf.E( "invalid kind %d %s in nip-98 http auth event, require %d %s", - ev.Kind.K, ev.Kind.Name(), kind.HTTPAuth.K, - kind.HTTPAuth.Name(), + ev.Kind.K, ev.Kind.Name(), kind.HTTPAuth.K, kind.HTTPAuth.Name(), ) return } @@ -180,15 +175,10 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) ( return } } - log.T.F("%d %s", time.Now().Unix(), ev.Serialize()) if valid, err = ev.Verify(); chk.E(err) { return } - if valid { - log.I.F("event verified %0x", ev.Pubkey) - } if !valid { - log.T.F("event not verified") return } pubkey = ev.Pubkey diff --git a/interfaces/store/store_interface.go b/interfaces/store/store_interface.go index e2bc053..ba78cfd 100644 --- a/interfaces/store/store_interface.go +++ b/interfaces/store/store_interface.go @@ -33,6 +33,11 @@ type I interface { Syncer LogLeveler EventIdSerialer + Initer +} + +type Initer interface { + Init(path string) (err error) } type Pather interface { diff --git a/interrupt/main.go b/interrupt/main.go index ed02eeb..aa25e82 100644 --- a/interrupt/main.go +++ b/interrupt/main.go @@ -5,12 +5,13 @@ package interrupt import ( "fmt" + "orly.dev/log" "os" "os/signal" "runtime" - "go.uber.org/atomic" - "orly.dev/log" + "orly.dev/atomic" + "orly.dev/qu" ) // HandlerWithSource is an interrupt handling closure and the source location that it was sent @@ -32,7 +33,7 @@ var ( signals = []os.Signal{os.Interrupt} // ShutdownRequestChan is a channel that can receive shutdown requests - ShutdownRequestChan = make(chan struct{}) + ShutdownRequestChan = qu.T() // addHandlerChan is used to add an interrupt handler to the list of handlers to be invoked // on SIGINT (Ctrl+C) signals. @@ -40,7 +41,7 @@ var ( // HandlersDone is closed after all interrupt handlers run the first time an interrupt is // signaled. - HandlersDone = make(chan struct{}) + HandlersDone = make(qu.C) interruptCallbacks []func() interruptCallbackSources []string @@ -60,7 +61,7 @@ func Listener() { interruptCallbacks[idx]() } log.D.Ln("interrupt handlers finished") - close(HandlersDone) + HandlersDone.Q() if RestartRequested { Restart() } @@ -74,7 +75,7 @@ out: invokeCallbacks() break out - case <-ShutdownRequestChan: + case <-ShutdownRequestChan.Wait(): log.W.Ln("received shutdown request - shutting down...") requested.Store(true) invokeCallbacks() @@ -87,7 +88,7 @@ out: handler.Source, ) - case <-HandlersDone: + case <-HandlersDone.Wait(): break out } } @@ -118,7 +119,7 @@ func Request() { return } requested.Store(true) - close(ShutdownRequestChan) + ShutdownRequestChan.Q() var ok bool select { case _, ok = <-ShutdownRequestChan: diff --git a/interrupt/restart.go b/interrupt/restart.go index f02c9c6..6c4dde4 100644 --- a/interrupt/restart.go +++ b/interrupt/restart.go @@ -3,12 +3,11 @@ package interrupt import ( + "orly.dev/log" "os" "syscall" "github.com/kardianos/osext" - - "orly.dev/log" ) // Restart uses syscall.Exec to restart the process. MacOS and Windows are not implemented, diff --git a/ints/gen/pregen.go b/ints/gen/pregen.go index 2411ff8..62f9dc9 100644 --- a/ints/gen/pregen.go +++ b/ints/gen/pregen.go @@ -4,9 +4,8 @@ package main import ( "fmt" - "os" - "orly.dev/chk" + "os" ) func main() { diff --git a/ints/ints.go b/ints/ints.go index 72d61ab..63578bf 100644 --- a/ints/ints.go +++ b/ints/ints.go @@ -7,10 +7,9 @@ package ints import ( _ "embed" "io" + "orly.dev/errorf" "golang.org/x/exp/constraints" - - "orly.dev/errorf" ) // run this to regenerate (pointlessly) the base 10 array of 4 places per entry @@ -19,6 +18,8 @@ import ( //go:embed base10k.txt var base10k []byte +const base = 10000 + // T is an integer with a fast codec to decimal ASCII. type T struct { N uint64 diff --git a/ints/ints_test.go b/ints/ints_test.go index 1e02b3f..de0a457 100644 --- a/ints/ints_test.go +++ b/ints/ints_test.go @@ -2,12 +2,11 @@ package ints import ( "math" + "orly.dev/chk" "strconv" "testing" "lukechampine.com/frand" - - "orly.dev/chk" ) func TestMarshalUnmarshal(t *testing.T) { diff --git a/json/base64.go b/json/base64.go index 37e5339..f50371f 100644 --- a/json/base64.go +++ b/json/base64.go @@ -3,9 +3,9 @@ package json import ( "bytes" "encoding/base64" - "orly.dev/chk" "orly.dev/errorf" + "orly.dev/text" ) diff --git a/json/bech32.go b/json/bech32.go index 985e81d..f642c5d 100644 --- a/json/bech32.go +++ b/json/bech32.go @@ -2,11 +2,11 @@ package json import ( "bytes" + "orly.dev/chk" + "orly.dev/errorf" "orly.dev/bech32encoding" - "orly.dev/chk" "orly.dev/ec/bech32" - "orly.dev/errorf" "orly.dev/text" ) diff --git a/json/examples_test.go b/json/examples_test.go index 9560edb..de8a863 100644 --- a/json/examples_test.go +++ b/json/examples_test.go @@ -3,8 +3,8 @@ package json import ( "bytes" "fmt" - "orly.dev/chk" + "orly.dev/hex" ) diff --git a/json/keyvalue.go b/json/keyvalue.go index 9a50af0..bb24908 100644 --- a/json/keyvalue.go +++ b/json/keyvalue.go @@ -2,9 +2,9 @@ package json import ( "io" - "orly.dev/chk" - "orly.dev/interfaces/codec" + + "orly.dev/codec" ) // An Object is an (not necessarily) ordered list of KeyValue. diff --git a/json/signed.go b/json/signed.go index dfad759..3762970 100644 --- a/json/signed.go +++ b/json/signed.go @@ -2,8 +2,8 @@ package json import ( "golang.org/x/exp/constraints" - "orly.dev/chk" + "orly.dev/ints" ) diff --git a/json/unsigned.go b/json/unsigned.go index c424593..e784006 100644 --- a/json/unsigned.go +++ b/json/unsigned.go @@ -2,8 +2,8 @@ package json import ( "golang.org/x/exp/constraints" - "orly.dev/chk" + "orly.dev/ints" ) diff --git a/keys/keys.go b/keys/keys.go new file mode 100644 index 0000000..719ffc0 --- /dev/null +++ b/keys/keys.go @@ -0,0 +1,82 @@ +// Package keys is a set of helpers for generating and converting public/secret +// keys to hex and back to binary. +package keys + +import ( + "bytes" + "orly.dev/chk" + + "orly.dev/ec/schnorr" + "orly.dev/hex" + "orly.dev/p256k" +) + +// GeneratePrivateKey - deprecated, use GenerateSecretKeyHex +var GeneratePrivateKey = func() string { return GenerateSecretKeyHex() } + +// GenerateSecretKey creates a new secret key and returns the bytes of the secret. +func GenerateSecretKey() (skb []byte, err error) { + signer := &p256k.Signer{} + if err = signer.Generate(); chk.E(err) { + return + } + skb = signer.Sec() + return +} + +// GenerateSecretKeyHex generates a secret key and encodes the bytes as hex. +func GenerateSecretKeyHex() (sks string) { + skb, err := GenerateSecretKey() + if chk.E(err) { + return + } + return hex.Enc(skb) +} + +// GetPublicKeyHex generates a public key from a hex encoded secret key. +func GetPublicKeyHex(sk string) (pk string, err error) { + var b []byte + if b, err = hex.Dec(sk); chk.E(err) { + return + } + signer := &p256k.Signer{} + if err = signer.InitSec(b); chk.E(err) { + return + } + + return hex.Enc(signer.Pub()), nil +} + +// SecretBytesToPubKeyHex generates a public key from secret key bytes. +func SecretBytesToPubKeyHex(skb []byte) (pk string, err error) { + signer := &p256k.Signer{} + if err = signer.InitSec(skb); chk.E(err) { + return + } + return hex.Enc(signer.Pub()), nil +} + +// IsValid32ByteHex checks that a hex string is a valid 32 bytes lower case hex encoded value as +// per nostr NIP-01 spec. +func IsValid32ByteHex[V []byte | string](pk V) bool { + if bytes.Equal(bytes.ToLower([]byte(pk)), []byte(pk)) { + return false + } + var err error + dec := make([]byte, 32) + if _, err = hex.DecBytes(dec, []byte(pk)); chk.E(err) { + } + return len(dec) == 32 +} + +// IsValidPublicKey checks that a hex encoded public key is a valid BIP-340 public key. +func IsValidPublicKey[V []byte | string](pk V) bool { + v, _ := hex.Dec(string(pk)) + _, err := schnorr.ParsePubKey(v) + return err == nil +} + +// HexPubkeyToBytes decodes a pubkey from hex encoded string/bytes. +func HexPubkeyToBytes[V []byte | string](hpk V) (pkb []byte, err error) { + return hex.DecAppend(nil, []byte(hpk)) +} diff --git a/kind/kind.go b/kind/kind.go index e6a0ba6..84a346f 100644 --- a/kind/kind.go +++ b/kind/kind.go @@ -4,11 +4,11 @@ package kind import ( + "orly.dev/chk" "sync" "golang.org/x/exp/constraints" - "orly.dev/chk" "orly.dev/ints" ) diff --git a/kind/kind_test.go b/kind/kind_test.go index 22730a5..72d38e2 100644 --- a/kind/kind_test.go +++ b/kind/kind_test.go @@ -1,11 +1,10 @@ package kind import ( + "orly.dev/chk" "testing" "lukechampine.com/frand" - - "orly.dev/chk" ) func TestMarshalUnmarshal(t *testing.T) { diff --git a/kinds/kinds_test.go b/kinds/kinds_test.go index fec719d..21a43af 100644 --- a/kinds/kinds_test.go +++ b/kinds/kinds_test.go @@ -1,11 +1,11 @@ package kinds import ( + "orly.dev/chk" "testing" "lukechampine.com/frand" - "orly.dev/chk" "orly.dev/kind" ) diff --git a/layer2/badgerbadger/badgerbadger.go b/layer2/badgerbadger/badgerbadger.go new file mode 100644 index 0000000..3abefec --- /dev/null +++ b/layer2/badgerbadger/badgerbadger.go @@ -0,0 +1,67 @@ +// Package badgerbadger is a test of the layer 2 that uses two instances of the +// ratel event store, meant for testing the layer 2 protocol with two tiers of +// the database a size limited cache and a large non-purging store. +package badgerbadger + +import ( + "sync" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/filter" + "orly.dev/layer2" + "orly.dev/ratel" + "orly.dev/store" +) + +// Backend is a hybrid badger/badger eventstore where L1 will have GC enabled +// and L2 will not. This is mainly for testing, as both are local. +type Backend struct { + *layer2.Backend +} + +var _ store.I = (*Backend)(nil) + +// GetBackend returns a l2.Backend that combines two differently configured +// backends... the settings need to be configured in the ratel.T data structure +// before calling this. +func GetBackend(c context.T, wg *sync.WaitGroup, L1, L2 *ratel.T) (es store.I) { + // log.I.S(L1, L2) + es = &layer2.Backend{Ctx: c, WG: wg, L1: L1, L2: L2} + return +} + +// Init sets up the badger event store and connects to the configured IC +// canister. +// +// required params are address, canister Id and the badger event store size +// limit (which can be 0) +func (b *Backend) Init(path string) (err error) { return b.Backend.Init(path) } + +// Close the connection to the database. +// IC is a request/response API authing at each request. +func (b *Backend) Close() (err error) { return b.Backend.Close() } + +// // CountEvents returns the number of events found matching the filter. +// func (b *Backend) CountEvents(c context.F, f *filter.F) (count int, approx bool, err error) { +// return b.Backend.CountEvents(c, f) +// } + +// DeleteEvent removes an event from the event store. +func (b *Backend) DeleteEvent(c context.T, eid *eventid.T) (err error) { + return b.Backend.DeleteEvent(c, eid) +} + +// QueryEvents searches for events that match a filter and returns them +// asynchronously over a provided channel. +func (b *Backend) QueryEvents(c context.T, f *filter.F) ( + ch event.Ts, err error, +) { + return b.Backend.QueryEvents(c, f) +} + +// SaveEvent writes an event to the event store. +func (b *Backend) SaveEvent(c context.T, ev *event.E) (err error) { + return b.Backend.SaveEvent(c, ev) +} diff --git a/layer2/badgerbadger/tester/badgerbadger.go b/layer2/badgerbadger/tester/badgerbadger.go new file mode 100644 index 0000000..774f6c0 --- /dev/null +++ b/layer2/badgerbadger/tester/badgerbadger.go @@ -0,0 +1,216 @@ +// Package main is a tester for a layer2 database scheme with one ratel DB with +// cache and the second not, testing the maintenance of the cache utilization +// and second level being accessed to fetch events that have been pruned out of +// the cache. +package main + +import ( + "orly.dev/chk" + "orly.dev/log" + "os" + "sync" + "time" + + "lukechampine.com/frand" + + "orly.dev/bech32encoding" + "orly.dev/context" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/interrupt" + "orly.dev/keys" + "orly.dev/layer2" + "orly.dev/lol" + "orly.dev/qu" + "orly.dev/ratel" + "orly.dev/tag" + "orly.dev/tests" + "orly.dev/units" +) + +type Counter struct { + id []byte + size int + requested int +} + +func main() { + lol.NoTimeStamp.Store(true) + lol.SetLogLevel(lol.LevelNames[lol.Debug]) + var ( + err error + sec []byte + mx sync.Mutex + counter []Counter + total int + MaxContentSize = units.Mb / 2 + TotalSize = 1 + MaxDelay = time.Second / 40 + HW = 50 + LW = 25 + // fill rate capped to size of difference between high and low water mark + diff = TotalSize * units.Gb * (HW - LW) / 100 + ) + if sec, err = keys.GenerateSecretKey(); chk.E(err) { + panic(err) + } + var nsec []byte + if nsec, err = bech32encoding.HexToNsec(sec); chk.E(err) { + panic(err) + } + log.T.Ln("signing with", nsec) + c, cancel := context.Cancel(context.Bg()) + var wg sync.WaitGroup + // defer cancel() + // create L1 with cache management settings enabled; we do it in the current dir + // because os.TempDir can point to a ramdisk which is very impractical for this + // test. + path := "./badgerbadgertest" + os.RemoveAll(path) + b1 := ratel.GetBackend( + c, &wg, true, true, units.Gb, lol.Error, 4*units.Mb, "none", + TotalSize, LW, HW, 2, + ) + // create L2 with no cache management + b2 := ratel.GetBackend( + c, &wg, false, true, units.Gb, lol.Trace, 4*units.Mb, "none", + ) + // Respond to interrupt signal and clean up after interrupt or end of test. + // defer chk.E(os.RemoveAll(path)) + interrupt.AddHandler( + func() { + cancel() + chk.E(os.RemoveAll(path)) + }, + ) + // now join them together in a 2 level eventstore + twoLevel := layer2.Backend{ + Ctx: c, + WG: &wg, + L1: b1, + L2: b2, + } + if err = twoLevel.Init(path); chk.E(err) { + os.Exit(1) + } + // start GC + // go b1.GarbageCollector() +end: + for { + select { + case <-c.Done(): + log.I.Ln("context canceled") + return + default: + } + mx.Lock() + if total > TotalSize*10*units.Gb { + log.I.Ln(total, TotalSize*10*units.Gb) + mx.Unlock() + cancel() + return + } + mx.Unlock() + newEvent := qu.T() + go func() { + ticker := time.NewTicker(time.Second) + var fetchIDs [][]byte + // start fetching loop + for { + select { + case <-newEvent: + // make new request, not necessarily from existing... bias rng + // factor by request count + mx.Lock() + var sum int + for i := range counter { + rn := frand.Intn(256) + if sum > diff { + // don't overfill + break + } + // multiply this number by the number of accesses the event + // has and request every event that gets over 50% so that we + // create a bias towards already requested. + if counter[i].requested+rn > 216 { + log.T.Ln( + "counter", counter[i].requested, "+", rn, + "=", + counter[i].requested+rn, + ) + // log.F.Ln("adding to fetchIDs") + counter[i].requested++ + fetchIDs = append(fetchIDs, counter[i].id) + sum += counter[i].size + } + } + // if len(fetchIDs) > 0 { + // log.F.Ln("fetchIDs", len(fetchIDs), fetchIDs) + // } + mx.Unlock() + case <-ticker.C: + // copy out current list of events to request + mx.Lock() + log.T.Ln("ticker", len(fetchIDs)) + ids := tag.NewWithCap(len(fetchIDs)) + for i := range fetchIDs { + ids.Append(fetchIDs[i]) + } + fetchIDs = fetchIDs[:0] + mx.Unlock() + if ids.Len() > 0 { + _, err = twoLevel.QueryEvents(c, &filter.F{Ids: ids}) + } + case <-c.Done(): + log.I.Ln("context canceled") + return + } + } + }() + var ev *event.E + var bs int + out: + for { + select { + case <-c.Done(): + log.I.Ln("context canceled") + return + default: + } + if ev, bs, err = tests.GenerateEvent(MaxContentSize); chk.E(err) { + return + } + mx.Lock() + counter = append( + counter, Counter{id: ev.Id, size: bs, requested: 1}, + ) + total += bs + if total > TotalSize*10*units.Gb { + log.I.Ln(total, TotalSize*units.Gb) + mx.Unlock() + cancel() + break out + } + mx.Unlock() + newEvent.Signal() + sc, _ := context.Timeout(c, 2*time.Second) + if err = twoLevel.SaveEvent(sc, ev); chk.E(err) { + continue end + } + delay := frand.Intn(int(MaxDelay)) + log.T.Ln("waiting between", delay, "ns") + if delay == 0 { + continue + } + select { + case <-c.Done(): + log.I.Ln("context canceled") + return + case <-time.After(time.Duration(delay)): + } + } + select { + case <-c.Done(): + } + } +} diff --git a/layer2/layer2.go b/layer2/layer2.go new file mode 100644 index 0000000..9cadf30 --- /dev/null +++ b/layer2/layer2.go @@ -0,0 +1,279 @@ +// Package layer2 is a library for building nostr event stores with two separate +// data storage systems, primarily for creating size limited caches with larger +// stores backing them, to enable scaling providing access to an event store to +// more users more quickly via a caching strategy. +package layer2 + +import ( + "errors" + "io" + "orly.dev/chk" + "orly.dev/log" + "path/filepath" + "sync" + "time" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/filter" + "orly.dev/store" + "orly.dev/tag" + "orly.dev/timestamp" +) + +// Backend is a two level nostr event store. The first level is assumed to have a subset of all +// events that the second level has. This is a mechanism for sharding nostr event data across +// multiple relays which can then be failovers for each other or shards by geography or subject +// matter. +type Backend struct { + Ctx context.T + WG *sync.WaitGroup + path string + // L1 will store its state/configuration in path/layer1 + L1 store.I + // L2 will store its state/configuration in path/layer2 + L2 store.I + // PollFrequency is how often the L2 is queried for recent events. This is only + // relevant for shared layer2 stores, and will not apply for layer2 + // implementations that are just two separate data store systems on the same + // server. + PollFrequency time.Duration + // PollOverlap is the multiple of the PollFrequency within which polling the L2 + // is done to ensure any slow synchrony on the L2 is covered (2-4 usually). + PollOverlap int + // EventSignal triggers when the L1 saves a new event from the L2 + // + // caller is responsible for populating this so that a signal can pass to all + // peers sharing the same L2 and enable cross-cluster subscription delivery. + EventSignal event.C +} + +// Init a layer2.Backend setting up their configurations and polling frequencies and other +// similar things. +func (b *Backend) Init(path string) (err error) { + b.path = path + // each backend will have configuration files living in a subfolder of the same + // root, path/layer1 and path/layer2 - this may only be state/configuration, or + // it can be the site of the storage of data. + path1 := filepath.Join(path, "layer1") + path2 := filepath.Join(path, "layer2") + if err = b.L1.Init(path1); chk.E(err) { + return + } + if err = b.L2.Init(path2); chk.E(err) { + return + } + // if poll syncing is disabled don't start the ticker + if b.PollFrequency == 0 { + return + } + // Polling overlap should be 4x polling frequency, if less than 2x + if b.PollOverlap < 2 { + b.PollOverlap = 4 + } + log.I.Ln( + "L2 polling frequency", b.PollFrequency, "overlap", + b.PollFrequency*time.Duration(b.PollOverlap), + ) + go func() { + ticker := time.NewTicker(5 * time.Second) + last := timestamp.Now().I64() + out: + for { + select { + case <-b.Ctx.Done(): + chk.E(b.Close()) + return + case <-ticker.C: + until := timestamp.Now() + var evs []*event.E + if evs, err = b.L2.QueryEvents( + b.Ctx, + &filter.F{Since: timestamp.FromUnix(last), Until: until}, + ); chk.E(err) { + continue out + } + // todo now wat + _ = evs + last = until.I64() - int64(time.Duration(b.PollOverlap)*b.PollFrequency/time.Second) + } + } + }() + return +} + +// Path returns the filesystem path root of the layer2.Backend. +func (b *Backend) Path() (s string) { return b.path } + +// Close the two layers of a layer2.Backend. +func (b *Backend) Close() (err error) { + var e1, e2 error + if e1 = b.L1.Close(); chk.E(e1) { + err = e1 + } + if e2 = b.L2.Close(); chk.E(e2) { + if err != nil { + err = errors.Join(err, e2) + } else { + err = e2 + } + } + return +} + +// Nuke wipes the both of the event stores in parallel and returns when both are complete. +func (b *Backend) Nuke() (err error) { + var wg sync.WaitGroup + var err1, err2 error + go func() { + if err1 = b.L1.Nuke(); chk.E(err) { + } + wg.Done() + }() + go func() { + wg.Add(1) + if err2 = b.L2.Nuke(); chk.E(err) { + } + wg.Done() + }() + wg.Wait() + err = errors.Join(err1, err2) + return +} + +// QueryEvents processes a filter.F search on the event store. The events found in the second +// level will be saved into the first level so they become available from the first layer next +// time they match. +func (b *Backend) QueryEvents(c context.T, f *filter.F) ( + evs event.Ts, err error, +) { + if evs, err = b.L1.QueryEvents(c, f); chk.E(err) { + return + } + // if there is pruned events (have only Id, no pubkey), they will also be in the + // L2 result, save these to the L1. + var revives [][]byte + var founds event.Ts + for _, ev := range evs { + if len(ev.Pubkey) == 0 { + // note the event Id to fetch + revives = append(revives, ev.Id) + } else { + founds = append(founds, ev) + } + } + evs = founds + go func(revives [][]byte) { + var err error + // construct the filter to fetch the missing events in the background that we + // know about, these will come in later on the subscription while it remains + // open. + l2filter := &filter.F{Ids: tag.New(revives...)} + var evs2 event.Ts + if evs2, err = b.L2.QueryEvents(c, l2filter); chk.E(err) { + return + } + for _, ev := range evs2 { + // saving the events here will trigger a match on the subscription + if err = b.L1.SaveEvent(c, ev); err != nil { + continue + } + } + // after fetching what we know exists of non pruned indexes that found stubs we + // want to run the query to the L2 anyway, and any matches that are found that + // were not locally available will now be available. + // + // if the subscription is still open the matches will be delivered later, the + // late events will be in descending (reverse chronological) order but the stream + // as a whole will not be. whatever. + var evs event.Ts + if evs, err = b.L2.QueryEvents(c, f); chk.E(err) { + return + } + for _, ev := range evs { + if err = b.L1.SaveEvent(c, ev); err != nil { + continue + } + } + }(revives) + return +} + +// // CountEvents counts how many events match on a filter, providing an approximate flag if either +// // of the layers return this, and the result is the maximum of the two layers results. +// func (b *Backend) CountEvents(c context.F, f *filter.F) (count int, approx bool, err error) { +// var wg sync.WaitGroup +// var count1, count2 int +// var approx1, approx2 bool +// var err1, err2 error +// go func() { +// count1, approx1, err1 = b.L1.CountEvents(c, f) +// wg.Done() +// }() +// // because this is a low-data query we will wait until the L2 also gets a count, +// // which should be under a few hundred ms in most cases +// go func() { +// wg.Add(1) +// count2, approx2, err2 = b.L2.CountEvents(c, f) +// }() +// wg.Wait() +// // we return the maximum, it is assumed the L2 is authoritative, but it could be +// // the L1 has more for whatever reason, so return the maximum of the two. +// count = count1 +// approx = approx1 +// if count2 > count { +// count = count2 +// // the approximate flag probably will be false if the L2 got more, and it is a +// // very large, non GC store. +// approx = approx2 +// } +// err = errors.Join(err1, err2) +// return +// } + +// DeleteEvent deletes an event on both the layer1 and layer2. +func (b *Backend) DeleteEvent(c context.T, ev *eventid.T) (err error) { + // delete the events from both stores. + err = errors.Join( + b.L1.DeleteEvent(c, ev), + b.L2.DeleteEvent(c, ev), + ) + return +} + +// SaveEvent stores an event on both layer1 and layer2. +func (b *Backend) SaveEvent(c context.T, ev *event.E) (err error) { + // save to both event stores + err = errors.Join( + b.L1.SaveEvent(c, ev), // this will also send out to subscriptions + b.L2.SaveEvent(c, ev), + ) + return +} + +// Import events to the layer2, if the events come up in searches they will be propagated down +// to the layer1. +func (b *Backend) Import(r io.Reader) { + // we import up to the L2 directly, demanded data will be fetched from it by + // later queries. + b.L2.Import(r) +} + +// Export from the layer2, which is assumed to be the most authoritative (and large) store of +// events available to the relay. +func (b *Backend) Export(c context.T, w io.Writer, pubkeys ...[]byte) { + // export only from the L2 as it is considered to be the authoritative event + // store of the two, and this is generally an administrative or infrequent action + // and latency will not matter as it usually will be a big bulky download. + b.L2.Export(c, w, pubkeys...) +} + +// Sync triggers both layer1 and layer2 to flush their buffers and store any events in caches. +func (b *Backend) Sync() (err error) { + err1 := b.L1.Sync() + // more than likely L2 sync is a noop. + err2 := b.L2.Sync() + err = errors.Join(err1, err2) + return +} diff --git a/list/list.go b/list/list.go deleted file mode 100644 index 6133589..0000000 --- a/list/list.go +++ /dev/null @@ -1,3 +0,0 @@ -package list - -type L map[string]struct{} diff --git a/lol/README.md b/lol/README.md index 896eaad..d9f49f8 100644 --- a/lol/README.md +++ b/lol/README.md @@ -7,41 +7,12 @@ main feature is printing source code locations to make debugging easier. ## terminals -Due to how so few terminals actually support source location hyperlinks, pretty much tilix and intellij terminal are -the only two that really provide adequate functionality; this logging library defaults to output format that works -best with intellij. As such, the terminal is aware of the CWD and the code locations printed are relative, as -required to get the hyperlinkization from this terminal. - -Handling support for Tilix requires more complications and -due to advances with IntelliJ's handling it is not practical to support any other for this purpose. Users of this +Due to how so few terminals actually support source location hyperlinks, pretty much tilix and intellij terminal are +the only two that really provide adequate functionality, this logging library defaults to output format that works +best with intellij. As such, the terminal is aware of the CWD and the code locations printed are relative, as +required to get the hyperlinkization from this terminal. Handling support for Tilix requires more complications and +due to advances with IntelliJ's handling it is not practical to support any other for this purpose. Users of this library can always fall back to manually interpreting and accessing the relative file path to find the source of a log. -## using with tilix - -this enables us to remove the base of the path for a more compact code location string, -this can be used with tilix custom hyperlinks feature - -create a script called `setcurrent` in your PATH ( eg ~/.local/bin/setcurrent ) - - #!/usr/bin/bash - echo $(pwd) > ~/.current - -make it executable - - chmod +x ~/.local/bin/setcurrent - -set the following environment variable in your ~/.bashrc - - export PROMPT_COMMAND='setcurrent' - -using the following regular expressions, replacing the path as necessary, and setting -perhaps a different program than ide (this is for goland, i use an alias to the binary) - - ^((([a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+)) ide --line $5 $(cat /home/mleku/.current)/$2 - [ ]((([a-zA-Z@0-9-_./]+)+([a-zA-Z@0-9-_.]+)):([0-9]+)) ide --line $5 $(cat /home/mleku/.current)/$2 - ([/](([a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+)) ide --line $5 /$2 - -and so long as you use this with an app containing /lol/log.go as this one is, this finds -that path and trims it off from the log line locations and in tilix you can click on the -file locations that are relative to the CWD where you are running the relay from. if this -is a remote machine, just go to the location where your source code is to make it work \ No newline at end of file +In addition, due to this terminal's slow rendering of long lines, long log strings are automatically broken into 80 +character lines, and if there is comma separators in the line, the line is broken at the comma instead of at column80. This works perfectly for this purpose. \ No newline at end of file diff --git a/lol/log.go b/lol/log.go index 2dfc143..d953186 100644 --- a/lol/log.go +++ b/lol/log.go @@ -1,20 +1,20 @@ -// Package lol (log of location) is a simple logging library the source location -// of a log print to make tracing errors simpler. -// -// Includes a set of logging levels and the ability to filter out higher log -// levels for a more quiet output. +// Package lol (log of location) is a simple logging library that prints a high precision unix +// timestamp and the source location of a log print to make tracing errors simpler. Includes a +// set of logging levels and the ability to filter out higher log levels for a more quiet +// output. package lol import ( "fmt" - "github.com/fatih/color" "io" "os" "runtime" + "strings" "sync/atomic" "time" "github.com/davecgh/go-spew/spew" + "github.com/fatih/color" ) const ( @@ -38,8 +38,8 @@ var LevelNames = []string{ } type ( - // LevelPrinter defines a set of terminal printing primitives that output with - // extra data, time, log logLevelList, and code location + // LevelPrinter defines a set of terminal printing primitives that output with extra data, + // time, log logLevelList, and code location // Ln prints lists of interfaces with spaces in between Ln func(a ...interface{}) @@ -47,10 +47,10 @@ type ( F func(format string, a ...interface{}) // S prints a spew.Sdump for an enveloper slice S func(a ...interface{}) - // C accepts a function so that the extra computation can be avoided if it is - // not being viewed + // C accepts a function so that the extra computation can be avoided if it is not being + // viewed C func(closure func() string) - // Chk is a shortcut for printing if there is an error or returning true + // Chk is a shortcut for printing if there is an error, or returning true Chk func(e error) bool // Err is a pass-through function that uses fmt.Errorf to construct an error and returns the // error after printing it to the log @@ -73,7 +73,7 @@ type ( Colorizer func(a ...any) string } - // Entry is a log entry to be printed as JSON to the log file + // Entry is a log entry to be printed as json to the log file Entry struct { Time time.Time Level string @@ -84,9 +84,8 @@ type ( ) var ( - // Writer can be swapped out for any io.*Writer* that you want to use instead - // of stdout. - Writer io.Writer = os.Stdout + // Writer can be swapped out for any io.*Writer* that you want to use instead of stdout. + Writer io.Writer = os.Stderr // LevelSpecs specifies the id, string name and color-printing function LevelSpecs = []LevelSpec{ @@ -98,6 +97,8 @@ var ( {Debug, "DBG", color.New(color.FgHiBlue).Sprint}, {Trace, "TRC", color.New(color.FgHiMagenta).Sprint}, } + NoTimeStamp atomic.Bool + ShortLoc atomic.Bool ) // NoSprint is a noop for sprint (it returns nothing no matter what is given to it). @@ -129,14 +130,11 @@ type Logger struct { // Level is the level that the logger is printing at. var Level atomic.Int32 -func GetLevel() string { - return LevelNames[Level.Load()] -} - // Main is the main logger. var Main = &Logger{} func init() { + // Main = &Logger{} Main.Log, Main.Check, Main.Errorf = New(os.Stderr, 2) ll := os.Getenv("LOG_LEVEL") if ll == "" { @@ -154,6 +152,7 @@ func init() { // SetLoggers configures a log level. func SetLoggers(level int) { + Main.Log.T.F("log level %s", LevelSpecs[level].Colorizer(LevelNames[level])) Level.Store(int32(level)) if Level.Load() < Trace { Tracer = noopTracer @@ -183,8 +182,7 @@ func SetLogLevel(level string) { SetLoggers(Trace) } -// JoinStrings joins together anything into a set of strings with space -// separating the items. +// JoinStrings joins together anything into a set of strings with space separating the items. func JoinStrings(a ...any) (s string) { for i := range a { s += fmt.Sprint(a[i]) @@ -205,9 +203,8 @@ func getTracer() (fn func(funcName string, variables ...any)) { for _, v := range variables { vars += spew.Sdump(v) } - fmt.Fprintf( - Writer, "%s %s %s\n%s", - // TimeStamper(), + fmt.Fprintf(Writer, "%s %s %s\n%s", + //TimeStamper(), LevelSpecs[Trace].Colorizer(LevelSpecs[Trace].Name), funcName, loc, @@ -231,10 +228,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter { if Level.Load() < l { return } - fmt.Fprintf( - writer, + fmt.Fprintf(writer, "%s%s %s %s\n", - TimeStamper(), + msgCol(TimeStamper()), LevelSpecs[l].Colorizer(LevelSpecs[l].Name), JoinStrings(a...), msgCol(GetLoc(skip)), @@ -244,10 +240,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter { if Level.Load() < l { return } - fmt.Fprintf( - writer, + fmt.Fprintf(writer, "%s%s %s %s\n", - TimeStamper(), + msgCol(TimeStamper()), LevelSpecs[l].Colorizer(LevelSpecs[l].Name), fmt.Sprintf(format, a...), msgCol(GetLoc(skip)), @@ -257,10 +252,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter { if Level.Load() < l { return } - fmt.Fprintf( - writer, + fmt.Fprintf(writer, "%s%s %s %s\n", - TimeStamper(), + msgCol(TimeStamper()), LevelSpecs[l].Colorizer(LevelSpecs[l].Name), spew.Sdump(a...), msgCol(GetLoc(skip)), @@ -270,10 +264,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter { if Level.Load() < l { return } - fmt.Fprintf( - writer, + fmt.Fprintf(writer, "%s%s %s %s\n", - TimeStamper(), + msgCol(TimeStamper()), LevelSpecs[l].Colorizer(LevelSpecs[l].Name), closure(), msgCol(GetLoc(skip)), @@ -284,10 +277,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter { return e != nil } if e != nil { - fmt.Fprintf( - writer, + fmt.Fprintf(writer, "%s%s %s %s\n", - TimeStamper(), + msgCol(TimeStamper()), LevelSpecs[l].Colorizer(LevelSpecs[l].Name), e.Error(), msgCol(GetLoc(skip)), @@ -298,10 +290,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter { }, Err: func(format string, a ...interface{}) error { if Level.Load() >= l { - fmt.Fprintf( - writer, + fmt.Fprintf(writer, "%s%s %s %s\n", - TimeStamper(), + msgCol(TimeStamper()), LevelSpecs[l].Colorizer(LevelSpecs[l].Name), fmt.Sprintf(format, a...), msgCol(GetLoc(skip)), @@ -320,11 +311,7 @@ func GetNullPrinter() LevelPrinter { S: func(a ...interface{}) {}, C: func(closure func() string) {}, Chk: func(e error) bool { return e != nil }, - Err: func(format string, a ...interface{}) error { - return fmt.Errorf( - format, a..., - ) - }, + Err: func(format string, a ...interface{}) error { return fmt.Errorf(format, a...) }, } } @@ -362,18 +349,10 @@ func New(writer io.Writer, skip int) (l *Log, c *Check, errorf *Errorf) { // TimeStamper generates the timestamp for logs. func TimeStamper() (s string) { - ts := time.Now().Format("150405.000000") - ds := time.Now().Format("2006-01-02") - s += color.New(color.FgBlue).Sprint(ds[0:4]) - s += color.New(color.FgHiBlue).Sprint(ds[5:7]) - s += color.New(color.FgBlue).Sprint(ds[8:]) - s += color.New(color.FgHiBlue).Sprint(ts[0:2]) - s += color.New(color.FgBlue).Sprint(ts[2:4]) - s += color.New(color.FgHiBlue).Sprint(ts[4:6]) - s += color.New(color.FgBlue).Sprint(ts[7:]) - // s = color.New(color.Faint).Sprint(s) - s += " " - return + if NoTimeStamp.Load() { + return + } + return time.Now().Format("2006-01-02T15:04:05Z07:00.000 ") } // var wd, _ = os.Getwd() @@ -420,14 +399,14 @@ func init() { // GetLoc returns the code location of the caller. func GetLoc(skip int) (output string) { _, file, line, _ := runtime.Caller(skip) - // if strings.Contains(file, "pkg/mod/") { - // } else { - // var split []string - // split = strings.Split(file, prefix) - // if len(split) > 1 { - // file = split[1] - // } - // } + if strings.Contains(file, "pkg/mod/") || !ShortLoc.Load() { + } else { + var split []string + split = strings.Split(file, prefix) + if len(split) > 1 { + file = split[1] + } + } output = fmt.Sprintf("%s:%d", file, line) return } diff --git a/lol/log_test.go b/lol/log_test.go index aa5d180..ece2846 100644 --- a/lol/log_test.go +++ b/lol/log_test.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" "testing" + "time" ) func TestLogLevels(t *testing.T) { @@ -15,9 +16,7 @@ func TestLogLevels(t *testing.T) { } // Test that LevelNames matches the constants - expectedLevelNames := []string{ - "off", "fatal", "error", "warn", "info", "debug", "trace", - } + expectedLevelNames := []string{"off", "fatal", "error", "warn", "info", "debug", "trace"} for i, name := range expectedLevelNames { if LevelNames[i] != name { t.Errorf("LevelNames[%d] = %s, want %s", i, LevelNames[i], name) @@ -41,17 +40,12 @@ func TestGetLogLevel(t *testing.T) { } for _, test := range tests { - t.Run( - test.level, func(t *testing.T) { - result := GetLogLevel(test.level) - if result != test.expected { - t.Errorf( - "GetLogLevel(%q) = %d, want %d", test.level, result, - test.expected, - ) - } - }, - ) + t.Run(test.level, func(t *testing.T) { + result := GetLogLevel(test.level) + if result != test.expected { + t.Errorf("GetLogLevel(%q) = %d, want %d", test.level, result, test.expected) + } + }) } } @@ -75,18 +69,13 @@ func TestSetLogLevel(t *testing.T) { } for _, test := range tests { - t.Run( - test.level, func(t *testing.T) { - SetLogLevel(test.level) - result := Level.Load() - if result != test.expected { - t.Errorf( - "After SetLogLevel(%q), Level = %d, want %d", - test.level, result, test.expected, - ) - } - }, - ) + t.Run(test.level, func(t *testing.T) { + SetLogLevel(test.level) + result := Level.Load() + if result != test.expected { + t.Errorf("After SetLogLevel(%q), Level = %d, want %d", test.level, result, test.expected) + } + }) } } @@ -103,20 +92,69 @@ func TestJoinStrings(t *testing.T) { } for i, test := range tests { - t.Run( - fmt.Sprintf("case_%d", i), func(t *testing.T) { - result := JoinStrings(test.args...) - if result != test.expected { - t.Errorf( - "JoinStrings(%v) = %q, want %q", test.args, result, - test.expected, - ) - } - }, - ) + t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) { + result := JoinStrings(test.args...) + if result != test.expected { + t.Errorf("JoinStrings(%v) = %q, want %q", test.args, result, test.expected) + } + }) } } +func TestTimeStamper(t *testing.T) { + // Test with NoTimeStamp = false + NoTimeStamp.Store(false) + timestamp := TimeStamper() + if timestamp == "" { + t.Error("TimeStamper() returned empty string when NoTimeStamp = false") + } + + // Check format (should be like "2006-01-02T15:04:05Z07:00.000 ") + _, err := time.Parse("2006-01-02T15:04:05Z07:00.000 ", timestamp) + if err != nil { + t.Errorf("TimeStamper() returned timestamp in unexpected format: %q, error: %v", timestamp, err) + } + + // Test with NoTimeStamp = true + NoTimeStamp.Store(true) + timestamp = TimeStamper() + if timestamp != "" { + t.Errorf("TimeStamper() returned %q when NoTimeStamp = true, expected empty string", timestamp) + } + + // Reset for other tests + NoTimeStamp.Store(false) +} + +func TestGetLoc(t *testing.T) { + // Test with ShortLoc = false + ShortLoc.Store(false) + loc := GetLoc(1) + if !strings.Contains(loc, "log_test.go") { + t.Errorf("GetLoc(1) = %q, expected to contain 'log_test.go'", loc) + } + + // Test with ShortLoc = true + ShortLoc.Store(true) + loc = GetLoc(1) + if !strings.Contains(loc, "log_test.go") { + t.Errorf("GetLoc(1) = %q, expected to contain 'log_test.go'", loc) + } + + // Test edge case where file path doesn't contain prefix + originalPrefix := prefix + defer func() { prefix = originalPrefix }() // Restore original prefix after test + + prefix = "non-existent-path" + loc = GetLoc(1) + if !strings.Contains(loc, "log_test.go") { + t.Errorf("GetLoc(1) with non-existent prefix = %q, expected to contain 'log_test.go'", loc) + } + + // Reset for other tests + ShortLoc.Store(false) +} + func TestGetPrinter(t *testing.T) { // Create a buffer to capture output var buf bytes.Buffer @@ -133,9 +171,7 @@ func TestGetPrinter(t *testing.T) { buf.Reset() printer.Ln("test message") if buf.String() != "" { - t.Errorf( - "printer.Ln() printed when level is too high: %q", buf.String(), - ) + t.Errorf("printer.Ln() printed when level is too high: %q", buf.String()) } // Set log level to Debug @@ -149,9 +185,7 @@ func TestGetPrinter(t *testing.T) { t.Error("printer.Ln() did not print when it should have") } if !strings.Contains(output, "test message") { - t.Errorf( - "printer.Ln() output %q does not contain 'test message'", output, - ) + t.Errorf("printer.Ln() output %q does not contain 'test message'", output) } // Test F method @@ -159,10 +193,7 @@ func TestGetPrinter(t *testing.T) { printer.F("formatted %s", "message") output = buf.String() if !strings.Contains(output, "formatted message") { - t.Errorf( - "printer.F() output %q does not contain 'formatted message'", - output, - ) + t.Errorf("printer.F() output %q does not contain 'formatted message'", output) } // Test S method @@ -170,9 +201,7 @@ func TestGetPrinter(t *testing.T) { printer.S("spew message") output = buf.String() if !strings.Contains(output, "spew message") { - t.Errorf( - "printer.S() output %q does not contain 'spew message'", output, - ) + t.Errorf("printer.S() output %q does not contain 'spew message'", output) } // Test C method @@ -180,9 +209,7 @@ func TestGetPrinter(t *testing.T) { printer.C(func() string { return "closure message" }) output = buf.String() if !strings.Contains(output, "closure message") { - t.Errorf( - "printer.C() output %q does not contain 'closure message'", output, - ) + t.Errorf("printer.C() output %q does not contain 'closure message'", output) } // Test Chk method with nil error @@ -203,10 +230,7 @@ func TestGetPrinter(t *testing.T) { t.Error("printer.Chk(error) returned false, expected true") } if !strings.Contains(buf.String(), "test error") { - t.Errorf( - "printer.Chk(error) output %q does not contain 'test error'", - buf.String(), - ) + t.Errorf("printer.Chk(error) output %q does not contain 'test error'", buf.String()) } // Test Err method @@ -216,17 +240,11 @@ func TestGetPrinter(t *testing.T) { t.Error("printer.Err() returned nil error") } if err.Error() != "error message" { - t.Errorf( - "printer.Err() returned error with message %q, expected 'error message'", - err.Error(), - ) + t.Errorf("printer.Err() returned error with message %q, expected 'error message'", err.Error()) } // Check if the message was logged if !strings.Contains(buf.String(), "error message") { - t.Errorf( - "printer.Err() output %q does not contain 'error message'", - buf.String(), - ) + t.Errorf("printer.Err() output %q does not contain 'error message'", buf.String()) } } @@ -253,10 +271,7 @@ func TestGetNullPrinter(t *testing.T) { t.Error("GetNullPrinter().Err() returned nil error") } if err.Error() != "test error" { - t.Errorf( - "GetNullPrinter().Err() returned error with message %q, expected 'test error'", - err.Error(), - ) + t.Errorf("GetNullPrinter().Err() returned error with message %q, expected 'test error'", err.Error()) } } @@ -283,9 +298,6 @@ func TestNew(t *testing.T) { buf.Reset() log.D.Ln("test message") if !strings.Contains(buf.String(), "test message") { - t.Errorf( - "log.D.Ln() output %q does not contain 'test message'", - buf.String(), - ) + t.Errorf("log.D.Ln() output %q does not contain 'test message'", buf.String()) } } diff --git a/main.go b/main.go index 513db3a..0f12818 100644 --- a/main.go +++ b/main.go @@ -1,27 +1,32 @@ +// Package main is a nostr relay with a simple follow/mute list authentication +// scheme and the new HTTP REST based protocol. Configuration is via environment +// variables or an optional .env file. package main import ( "fmt" "github.com/pkg/profile" - "net" "net/http" + _ "net/http/pprof" "orly.dev/chk" - "orly.dev/config" - "orly.dev/context" - "orly.dev/database" - "orly.dev/interrupt" "orly.dev/log" - "orly.dev/lol" - "orly.dev/servemux" - "orly.dev/server" - "orly.dev/socketapi" - "orly.dev/version" + realy_lol "orly.dev/version" "os" - "strconv" "sync" + + "orly.dev/app" + "orly.dev/context" + "orly.dev/interrupt" + "orly.dev/lol" + "orly.dev/ratel" + "orly.dev/realy" + "orly.dev/realy/config" + "orly.dev/realy/options" + "orly.dev/units" ) func main() { + log.I.F("starting realy %s", realy_lol.V) var err error var cfg *config.C if cfg, err = config.New(); chk.T(err) { @@ -39,39 +44,44 @@ func main() { config.PrintHelp(cfg, os.Stderr) os.Exit(0) } + log.I.Ln("log level", cfg.LogLevel) + lol.SetLogLevel(cfg.LogLevel) if cfg.Pprof { defer profile.Start(profile.MemProfile).Stop() go func() { chk.E(http.ListenAndServe("127.0.0.1:6060", nil)) }() } - log.I.F( - "starting %s %s; log level: %s", version.Name, version.V, - lol.GetLevel(), - ) - wg := &sync.WaitGroup{} + var wg sync.WaitGroup c, cancel := context.Cancel(context.Bg()) - interrupt.AddHandler(func() { cancel() }) - var sto *database.D - if sto, err = database.New( - c, cancel, cfg.DataDir, cfg.LogLevel, - ); chk.E(err) { - return + storage := ratel.New( + ratel.BackendParams{ + Ctx: c, + WG: &wg, + BlockCacheSize: units.Gb, + LogLevel: lol.GetLogLevel(cfg.DbLogLevel), + MaxLimit: ratel.DefaultMaxLimit, + }, + ) + r := &app.Relay{C: cfg, Store: storage} + go app.MonitorResources(c) + var server *realy.Server + serverParams := &realy.ServerParams{ + Ctx: c, + Cancel: cancel, + Rl: r, + DbPath: cfg.DataDir, + MaxLimit: ratel.DefaultMaxLimit, } - serveMux := servemux.New() - s := &server.S{ - Ctx: c, - Cancel: cancel, - WG: wg, - Addr: net.JoinHostPort(cfg.Listen, strconv.Itoa(cfg.Port)), - Mux: serveMux, - Cfg: cfg, - Store: sto, - } - wg.Add(1) - interrupt.AddHandler(func() { s.Shutdown() }) - socketapi.New(s, "/{$}", serveMux, socketapi.DefaultSocketParams()) - if err = s.Start(); chk.E(err) { + var opts []options.O + if server, err = realy.NewServer(serverParams, opts...); chk.E(err) { os.Exit(1) } + if err != nil { + log.F.F("failed to create server: %v", err) + } + interrupt.AddHandler(func() { server.Shutdown() }) + if err = server.Start(cfg.Listen, cfg.Port); chk.E(err) { + log.F.F("server terminated: %v", err) + } } diff --git a/normalize/normalize.go b/normalize/normalize.go index af1b8e3..0fdc9dd 100644 --- a/normalize/normalize.go +++ b/normalize/normalize.go @@ -4,11 +4,12 @@ package normalize import ( "bytes" + "fmt" "net/url" - "orly.dev/chk" - "orly.dev/ints" "orly.dev/log" + + "orly.dev/ints" ) var ( @@ -92,3 +93,48 @@ func URL[V string | []byte](v V) (b []byte) { p.Path = string(bytes.TrimRight([]byte(p.Path), "/")) return []byte(p.String()) } + +// Msg constructs a properly formatted message with a machine-readable prefix for OK and CLOSED +// envelopes. +func Msg(prefix Reason, format string, params ...any) []byte { + if len(prefix) < 1 { + prefix = Error + } + return []byte(fmt.Sprintf(prefix.S()+": "+format, params...)) +} + +// Reason is the machine-readable prefix before the colon in an OK or CLOSED envelope message. +// Below are the most common kinds that are mentioned in NIP-01. +type Reason []byte + +var ( + AuthRequired = Reason("auth-required") + PoW = Reason("pow") + Duplicate = Reason("duplicate") + Blocked = Reason("blocked") + RateLimited = Reason("rate-limited") + Invalid = Reason("invalid") + Error = Reason("error") + Unsupported = Reason("unsupported") + Restricted = Reason("restricted") +) + +// S returns the Reason as a string +func (r Reason) S() string { return string(r) } + +// B returns the Reason as a byte slice. +func (r Reason) B() []byte { return r } + +// IsPrefix returns whether a text contains the same Reason prefix. +func (r Reason) IsPrefix(reason []byte) bool { + return bytes.HasPrefix( + reason, r.B(), + ) +} + +// F allows creation of a full Reason text with a printf style format. +func (r Reason) F(format string, params ...any) []byte { + return Msg( + r, format, params..., + ) +} diff --git a/nwc/doc.go b/nwc/doc.go new file mode 100644 index 0000000..66f90cc --- /dev/null +++ b/nwc/doc.go @@ -0,0 +1,4 @@ +// Package nwc is an implementation of the NWC Nostr Wallet Connect protocol for +// communicating with lightning (and potentially other kinds of wallets) using +// nostr ephemeral event messages. +package nwc diff --git a/nwc/error.go b/nwc/error.go new file mode 100644 index 0000000..ff3df5a --- /dev/null +++ b/nwc/error.go @@ -0,0 +1,6 @@ +package nwc + +type Error struct { + Code []byte + Message []byte +} diff --git a/nwc/get_balance.go b/nwc/get_balance.go new file mode 100644 index 0000000..80c9813 --- /dev/null +++ b/nwc/get_balance.go @@ -0,0 +1,19 @@ +package nwc + +type GetBalanceRequest struct { + Request + // nothing to see here, move along +} + +func NewGetBalanceRequest() *GetBalanceRequest { + return &GetBalanceRequest{Request{Methods.GetBalance}} +} + +type GetBalanceResponse struct { + Response + Balance Msat +} + +func NewGetBalanceResponse(balance Msat) *GetBalanceResponse { + return &GetBalanceResponse{Response{Type: Methods.GetBalance}, balance} +} diff --git a/nwc/get_info.go b/nwc/get_info.go new file mode 100644 index 0000000..6d42a13 --- /dev/null +++ b/nwc/get_info.go @@ -0,0 +1,29 @@ +package nwc + +type GetInfoRequest struct { + Request + // nothing to see here, move along +} + +func NewGetInfoRequest() GetInfoRequest { + return GetInfoRequest{Request{Methods.GetInfo}} +} + +type GetInfo struct { + Alias []byte + Color []byte // Hex string + Pubkey []byte + Network []byte // mainnet/testnet/signet/regtest + BlockHeight uint64 + BlockHash []byte + Methods []byte // pay_invoice, get_balance, make_invoice, lookup_invoice, list_transactions, get_info (list of methods) +} + +type GetInfoResponse struct { + Response + GetInfo +} + +func NewGetInfoResponse(gi GetInfo) GetInfoResponse { + return GetInfoResponse{Response{Type: Methods.GetInfo}, gi} +} diff --git a/nwc/lightning.go b/nwc/lightning.go new file mode 100644 index 0000000..ef0968a --- /dev/null +++ b/nwc/lightning.go @@ -0,0 +1,18 @@ +package nwc + +import ( + "orly.dev/kind" +) + +var Kinds = []*kind.T{ + kind.WalletInfo, + kind.WalletRequest, + kind.WalletResponse, + kind.WalletNotification, +} + +type Server struct { +} + +type Client struct { +} diff --git a/nwc/list_transactions.go b/nwc/list_transactions.go new file mode 100644 index 0000000..57cbab5 --- /dev/null +++ b/nwc/list_transactions.go @@ -0,0 +1,21 @@ +package nwc + +type ListTransactionsRequest struct { + Request + ListTransactions +} + +func NewListTransactionsRequest(req ListTransactions) *ListTransactionsRequest { + return &ListTransactionsRequest{ + Request{Methods.ListTransactions}, req, + } +} + +type ListTransactionsResponse struct { + Response + Transactions []LookupInvoice +} + +func NewListTransactionsResponse(txs []LookupInvoice) ListTransactionsResponse { + return ListTransactionsResponse{Response{Type: Methods.ListTransactions}, txs} +} diff --git a/nwc/lookup_invoice.go b/nwc/lookup_invoice.go new file mode 100644 index 0000000..7385a95 --- /dev/null +++ b/nwc/lookup_invoice.go @@ -0,0 +1,26 @@ +package nwc + +type LookupInvoiceRequest struct { + Request + PaymentHash, Invoice []byte +} + +func NewLookupInvoiceRequest(paymentHash, invoice []byte) *LookupInvoiceRequest { + return &LookupInvoiceRequest{ + Request{Methods.LookupInvoice}, paymentHash, invoice, + } +} + +type LookupInvoice struct { + Response + InvoiceResponse + SettledAt int64 // optional if unpaid +} +type LookupInvoiceResponse struct { + Response + LookupInvoice +} + +func NewLookupInvoiceResponse(resp LookupInvoice) LookupInvoiceResponse { + return LookupInvoiceResponse{Response{Type: Methods.LookupInvoice}, resp} +} diff --git a/nwc/make_invoice_response.go b/nwc/make_invoice_response.go new file mode 100644 index 0000000..e53fa7b --- /dev/null +++ b/nwc/make_invoice_response.go @@ -0,0 +1,29 @@ +package nwc + +type MakeInvoiceRequest struct { + Request + Amount Msat + Description []byte // optional + DescriptionHash []byte // optional + Expiry int // optional +} + +func NewMakeInvoiceRequest(amount Msat, description, descriptionHash []byte, + expiry int) MakeInvoiceRequest { + return MakeInvoiceRequest{ + Request{Methods.MakeInvoice}, + amount, + description, + descriptionHash, + expiry, + } +} + +type MakeInvoiceResponse struct { + Response + InvoiceResponse +} + +func NewMakeInvoiceResponse(resp InvoiceResponse) MakeInvoiceResponse { + return MakeInvoiceResponse{Response{Type: Methods.MakeInvoice}, resp} +} diff --git a/nwc/multi_pay_invoice.go b/nwc/multi_pay_invoice.go new file mode 100644 index 0000000..73314a9 --- /dev/null +++ b/nwc/multi_pay_invoice.go @@ -0,0 +1,19 @@ +package nwc + +type MultiPayInvoiceRequest struct { + Request + Invoices []Invoice +} + +func NewMultiPayInvoiceRequest(invoices []Invoice) MultiPayInvoiceRequest { + return MultiPayInvoiceRequest{ + Request: Request{Methods.MultiPayInvoice}, + Invoices: invoices, + } +} + +type MultiPayInvoiceResponse = PayInvoiceResponse + +func NewMultiPayInvoiceResponse(preimage []byte, feesPaid Msat) MultiPayInvoiceResponse { + return MultiPayInvoiceResponse{Response{Type: Methods.MultiPayInvoice}, preimage, feesPaid} +} diff --git a/nwc/multi_pay_keysend.go b/nwc/multi_pay_keysend.go new file mode 100644 index 0000000..5fe9fdd --- /dev/null +++ b/nwc/multi_pay_keysend.go @@ -0,0 +1,18 @@ +package nwc + +type MultiPayKeysendRequest struct { + Request + Keysends []PayKeysendRequest +} + +func NewMultiPayKeysendRequest(keysends []PayKeysendRequest) MultiPayKeysendRequest { + return MultiPayKeysendRequest{Request{Methods.MultiPayKeysend}, keysends} +} + +type MultiPayKeysendResponse = PayKeysendResponse + +func NewMultiPayKKeysendResponse(preimage []byte, feesPaid Msat) MultiPayKeysendResponse { + return MultiPayKeysendResponse{ + Response{Type: Methods.MultiPayKeysend}, preimage, feesPaid, + } +} diff --git a/nwc/names.go b/nwc/names.go new file mode 100644 index 0000000..ce7fb8f --- /dev/null +++ b/nwc/names.go @@ -0,0 +1,130 @@ +package nwc + +// Methods are the text of the value of the Method field of Request.Method and +// Response.ResultType in a form that allows more convenient reference than using +// a map or package scoped variable. These appear in the API Request and Response +// types. +var Methods = struct { + PayInvoice, + MultiPayInvoice, + PayKeysend, + MultiPayKeysend, + MakeInvoice, + LookupInvoice, + ListTransactions, + GetBalance, + GetInfo []byte +}{ + []byte("pay_invoice"), + []byte("multi_pay_invoice"), + []byte("pay_keysend"), + []byte("multi_pay_keysend"), + []byte("make_invoice"), + []byte("lookup_invoice"), + []byte("list_transactions"), + []byte("get_balance"), + []byte("get_info"), +} + +// Keys are the proper JSON bytes for the JSON object keys of the structs of the +// same-named type used lower in the following. Anonymous struct syntax is used +// to make neater addressing of these fields as symbols. +var Keys = struct { + Method, + Params, + ResultType, + Error, + Result, + Invoice, + Amount, + Preimage, + FeesPaid, + Id, + TLVRecords, + Type, + Value, + Pubkey, + Description, + DescriptionHash, + Expiry, + CreatedAt, + ExpiresAt, + Metadata, + SettledAt, + From, + Until, + Offset, + Unpaid, + Balance, + Notifications, + NotificationType, + Notification, + PaymentHash []byte +}{ + []byte("method"), + []byte("params"), + []byte("result_type"), + []byte("error"), + []byte("result"), + []byte("invoice"), + []byte("amount"), + []byte("preimage"), + []byte("fees_paid"), + []byte("id"), + []byte("tlv_records"), + []byte("type"), + []byte("value"), + []byte("pubkey"), + []byte("description"), + []byte("description_hash"), + []byte("expiry"), + []byte("created_at"), + []byte("expires_at"), + []byte("metadata"), + []byte("settled_at"), + []byte("from"), + []byte("until"), + []byte("offset"), + []byte("unpaid"), + []byte("balance"), + []byte("notifications"), + []byte("notification_type"), + []byte("notification"), + []byte("payment_hash"), +} + +// Notifications are the proper strings for the Notification.NotificationType +var Notifications = struct { + PaymentReceived, PaymentSent []byte +}{ + []byte("payment_received"), + []byte("payment_sent"), +} + +var Errors = struct { + // RateLimited - The client is sending commands too fast.It should retry in a few seconds. + RateLimited, + // NotImplemented - The command is not known or is intentionally not implemented. + NotImplemented, + // InsufficientBalance - The wallet does not have enough funds to cover a fee reserve or the payment amount. + InsufficientBalance, + // QuotaExceeded - The wallet has exceeded its spending quota. + QuotaExceeded, + // Restricted - This public key is not allowed to do this operation. + Restricted, + // Unauthorized - This public key has no wallet connected. + Unauthorized, + // Internal - An internal error. + Internal, + // Other - Other error. + Other []byte +}{ + []byte("RATE_LIMITED"), + []byte("NOT_IMPLEMENTED"), + []byte("INSUFFICIENT_BALANCE"), + []byte("QUOTA_EXCEEDED"), + []byte("RESTRICTED"), + []byte("UNAUTHORIZED"), + []byte("INTERNAL"), + []byte("OTHER"), +} diff --git a/nwc/payKeysend.go b/nwc/payKeysend.go new file mode 100644 index 0000000..212bc2d --- /dev/null +++ b/nwc/payKeysend.go @@ -0,0 +1 @@ +package nwc diff --git a/nwc/pay_invoice.go b/nwc/pay_invoice.go new file mode 100644 index 0000000..251f9c5 --- /dev/null +++ b/nwc/pay_invoice.go @@ -0,0 +1,91 @@ +package nwc + +import ( + "orly.dev/text" +) + +type PayInvoiceRequest struct { + Request + Invoice +} + +func NewPayInvoiceRequest[V string | []byte]( + invoice V, amount Msat, +) PayInvoiceRequest { + return PayInvoiceRequest{ + Request{Methods.PayInvoice}, Invoice{nil, []byte(invoice), amount}, + } +} + +func (p PayInvoiceRequest) Marshal(dst []byte) (b []byte) { + // open parentheses + dst = append(dst, '{') + // method + dst = text.JSONKey(dst, Keys.Method) + dst = text.Quote(dst, p.RequestType()) + dst = append(dst, ',') + // Params + dst = text.JSONKey(dst, Keys.Params) + dst = append(dst, '{') + // Invoice + dst = text.JSONKey(dst, Keys.Invoice) + dst = text.AppendQuote(dst, p.Invoice.Invoice, text.Noop) + // Amount - optional (omit if zero) + if p.Amount > 0 { + dst = append(dst, ',') + dst = text.JSONKey(dst, Keys.Amount) + dst = p.Amount.Bytes(dst) + } + // close parentheses + dst = append(dst, '}') + dst = append(dst, '}') + b = dst + return +} + +func (p PayInvoiceRequest) Unmarshal(b []byte) (r []byte, err error) { + + return +} + +type PayInvoiceResponse struct { + Response + Preimage []byte + FeesPaid Msat // optional, omitted if zero +} + +func NewPayInvoiceResponse(preimage []byte, feesPaid Msat) PayInvoiceResponse { + return PayInvoiceResponse{ + Response{Type: Methods.PayInvoice}, preimage, feesPaid, + } +} + +func (p PayInvoiceResponse) Marshal(dst []byte) (b []byte) { + // open parentheses + dst = append(dst, '{') + // method + dst = text.JSONKey(dst, p.Response.Type) + dst = text.Quote(dst, p.ResultType()) + // Params + dst = text.JSONKey(dst, Keys.Params) + // open parenthesis + dst = append(dst, '{') + // Invoice + dst = text.JSONKey(dst, Keys.Preimage) + dst = text.AppendQuote(dst, p.Preimage, text.Noop) + // Amount - optional (omit if zero) + if p.FeesPaid > 0 { + dst = append(dst, ',') + dst = text.JSONKey(dst, Keys.FeesPaid) + dst = p.FeesPaid.Bytes(dst) + } + // close parentheses + dst = append(dst, '}') + dst = append(dst, '}') + return +} + +func (p PayInvoiceResponse) Unmarshal(b []byte) (r []byte, err error) { + // TODO implement me + panic("implement me") +} diff --git a/nwc/pay_invoice_test.go b/nwc/pay_invoice_test.go new file mode 100644 index 0000000..148a5e9 --- /dev/null +++ b/nwc/pay_invoice_test.go @@ -0,0 +1,25 @@ +package nwc + +import ( + "fmt" + "orly.dev/chk" +) + +func ExamplePayInvoiceRequest_Marshal() { + ir := NewPayInvoiceRequest("lnbc50n1...", 0) + var b []byte + var err error + if b = ir.Marshal(b); chk.E(err) { + return + } + fmt.Printf("%s\n", b) + b = b[:0] + ir = NewPayInvoiceRequest("lnbc50n1...", 123) + if b = ir.Marshal(b); chk.E(err) { + return + } + fmt.Printf("%s\n", b) + // Output: + // {"method":"pay_invoice","params":{"invoice":"lnbc50n1..."}} + // {"method":"pay_invoice","params":{"invoice":"lnbc50n1...","amount":123}} +} diff --git a/nwc/pay_keysend.go b/nwc/pay_keysend.go new file mode 100644 index 0000000..fce7b5f --- /dev/null +++ b/nwc/pay_keysend.go @@ -0,0 +1,33 @@ +package nwc + +type TLV struct { + Type uint64 + Value []byte +} + +type PayKeysendRequest struct { + Request + Amount Msat + Pubkey []byte + Preimage []byte // optional + TLVRecords []TLV // optional +} + +func NewPayKeysendRequest(amount Msat, pubkey, preimage []byte, + tlvRecords []TLV) PayKeysendRequest { + return PayKeysendRequest{ + Request{Methods.PayKeysend}, + amount, + pubkey, + preimage, + tlvRecords, + } +} + +type PayKeysendResponse = PayInvoiceResponse + +func NewPayKeysendResponse(preimage []byte, feesPaid Msat) PayKeysendResponse { + return PayInvoiceResponse{ + Response{Type: Methods.PayKeysend}, preimage, feesPaid, + } +} diff --git a/nwc/protocols.go b/nwc/protocols.go new file mode 100644 index 0000000..4824ff1 --- /dev/null +++ b/nwc/protocols.go @@ -0,0 +1,101 @@ +package nwc + +import ( + "orly.dev/ints" +) + +// Interfaces +// +// By using these interfaces and embedding the following implementations it becomes simple to type check the specific +// request, response or notification variable being used in a given place in the code, without using reflection. +// +// All request, responses and methods embed the implementations and their types then become easily checked. + +type Requester interface { + RequestType() []byte +} + +type Resulter interface { + ResultType() []byte +} + +type Notifier interface { + NotificationType() []byte +} + +// Implementations +// +// By embedding the following types into the message structs and writing a constructor that loads the type name, +// code can handle these without reflection, determine type via type assertion and introspect the message type via +// the interface accessor method. + +type Request struct { + Method []byte +} + +func (r Request) RequestType() []byte { return r.Method } + +type Response struct { + Type []byte + Error +} + +func (r Response) ResultType() []byte { return r.Type } + +type Notification struct { + Type []byte +} + +func (n Notification) NotificationType() []byte { return n.Type } + +// Msat is milli-sat, max possible value is 1000 x 21 x 100 000 000 (well, under 19 places of 64 bits in base 10) +type Msat uint64 + +func (m Msat) Bytes(dst []byte) (b []byte) { return ints.New(uint64(m)).Marshal(dst) } + +// Methods + +type Invoice struct { + Id []byte // nil for request, required for responses (omitted if nil) + Invoice []byte + Amount Msat // optional, omitted if zero +} + +type InvoiceResponse struct { + Type []byte // incoming or outgoing + Invoice []byte // optional + Description []byte // optional + DescriptionHash []byte // optional + Preimage []byte // optional if unpaid + PaymentHash []byte + Amount Msat + FeesPaid Msat + CreatedAt int64 + ExpiresAt int64 // optional if not applicable + Metadata []any // optional, probably like tags but retardation can be retarded so allow also numbers and floats + +} + +type ListTransactions struct { + From int64 // optional + Until int64 // optional + Limit int // optional + Offset int // optional + Unpaid bool // optional default false + Type []byte // incoming/outgoing/empty for "both" +} + +// Notifications + +var ( + PaymentSent = []byte("payment_sent") + PaymentReceived = []byte("payment_received") +) + +type PaymentSentNotification struct { + LookupInvoiceResponse +} + +type PaymentReceivedNotification struct { + LookupInvoiceResponse +} diff --git a/openapi/common.go b/openapi/common.go new file mode 100644 index 0000000..3d5469a --- /dev/null +++ b/openapi/common.go @@ -0,0 +1,12 @@ +package openapi + +import ( + "orly.dev/realy/interfaces" +) + +type Operations struct{ interfaces.Server } + +// NewOperations creates a new openapi.Operations.. +func NewOperations(s interfaces.Server) (ep *Operations) { + return &Operations{Server: s} +} diff --git a/openapi/http-configuration.go b/openapi/http-configuration.go new file mode 100644 index 0000000..2eff132 --- /dev/null +++ b/openapi/http-configuration.go @@ -0,0 +1,94 @@ +package openapi + +// import ( +// "net/http" +// +// "github.com/danielgtaylor/huma/v2" +// +// "orly.dev/context" +// "orly.dev/realy/helpers" +// "orly.dev/store" +// ) +// +// // ConfigurationSetInput is the parameters for HTTP API method to set Configuration. +// type ConfigurationSetInput struct { +// Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"` +// Body *store.Configuration `doc:"the new configuration"` +// } +// +// // ConfigurationGetInput is the parameters for HTTP API method to get Configuration. +// type ConfigurationGetInput struct { +// Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"` +// Accept string `header:"Accept" default:"application/json" enum:"application/json" required:"true"` +// } +// +// // ConfigurationGetOutput is the result of getting Configuration. +// type ConfigurationGetOutput struct { +// Body store.Configuration `doc:"the current configuration"` +// } +// +// // RegisterConfigurationSet implements the HTTP API for setting Configuration. +// func (x *Operations) RegisterConfigurationSet(api huma.API) { +// name := "ConfigurationSet" +// description := "Set the configuration" +// path := "/configuration/set" +// scopes := []string{"admin", "write"} +// method := http.MethodPost +// huma.Register(api, huma.Operation{ +// OperationID: name, +// Summary: name, +// Path: path, +// Method: method, +// Tags: []string{"admin"}, +// Description: helpers.GenerateDescription(description, scopes), +// Security: []map[string][]string{{"auth": scopes}}, +// }, func(ctx context.T, input *ConfigurationSetInput) (wgh *struct{}, err error) { +// log.I.S(input) +// r := ctx.Value("http-request").(*http.Request) +// // w := ctx.Value("http-response").(http.ResponseWriter) +// // rr := GetRemoteFromReq(r) +// authed, _ := x.AdminAuth(r) +// if !authed { +// // pubkey = ev.Pubkey +// err = huma.Error401Unauthorized("authorization required") +// return +// } +// sto := x.Storage() +// if c, ok := sto.(store.Configurationer); ok { +// if err = c.SetConfiguration(input.Body); chk.E(err) { +// return +// } +// x.SetConfiguration(input.Body) +// } +// return +// }) +// } +// +// // RegisterConfigurationGet implements the HTTP API for getting the Configuration. +// func (x *Operations) RegisterConfigurationGet(api huma.API) { +// name := "ConfigurationGet" +// description := "Fetch the current configuration" +// path := "/configuration/get" +// scopes := []string{"admin", "read"} +// method := http.MethodGet +// huma.Register(api, huma.Operation{ +// OperationID: name, +// Summary: name, +// Path: path, +// Method: method, +// Tags: []string{"admin"}, +// Description: helpers.GenerateDescription(description, scopes), +// Security: []map[string][]string{{"auth": scopes}}, +// }, func(ctx context.T, input *ConfigurationGetInput) (output *ConfigurationGetOutput, +// err error) { +// r := ctx.Value("http-request").(*http.Request) +// authed, _ := x.AdminAuth(r) +// if !authed { +// err = huma.Error401Unauthorized("authorization required") +// return +// } +// output = &ConfigurationGetOutput{Body: x.Configuration()} +// // } +// return +// }) +// } diff --git a/openapi/http-disconnect.go b/openapi/http-disconnect.go new file mode 100644 index 0000000..2666e7b --- /dev/null +++ b/openapi/http-disconnect.go @@ -0,0 +1,51 @@ +package openapi + +import ( + "net/http" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/realy/helpers" +) + +// DisconnectInput is the parameters for triggering the disconnection of all open websockets. +type DisconnectInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"` +} + +// DisconnectOutput is the result type for the Disconnect HTTP API method. +type DisconnectOutput struct{} + +// RegisterDisconnect is the implementation of the HTTP API Disconnect method. +func (x *Operations) RegisterDisconnect(api huma.API) { + name := "Disconnect" + description := "Close all open nip-01 websockets" + path := "/disconnect" + scopes := []string{"admin"} + method := http.MethodGet + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"admin"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + DefaultStatus: 204, + }, func(ctx context.T, input *DisconnectInput) ( + wgh *DisconnectOutput, err error, + ) { + // r := ctx.Value("http-request").(*http.Request) + // authed, _ := x.AdminAuth(r) + // if !authed { + // // pubkey = ev.Pubkey + // err = huma.Error401Unauthorized("authorization required") + // return + // } + x.Disconnect() + return + }, + ) +} diff --git a/openapi/http-event.go b/openapi/http-event.go new file mode 100644 index 0000000..800bb14 --- /dev/null +++ b/openapi/http-event.go @@ -0,0 +1,249 @@ +package openapi + +import ( + "bytes" + "errors" + "fmt" + "net/http" + "orly.dev/chk" + "orly.dev/log" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/hex" + "orly.dev/httpauth" + "orly.dev/ints" + "orly.dev/kind" + "orly.dev/realy/helpers" + "orly.dev/sha256" + "orly.dev/tag" +) + +// EventInput is the parameters for the Event HTTP API method. +type EventInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"` + RawBody []byte +} + +// EventOutput is the return parameters for the HTTP API Event method. +type EventOutput struct{ Body string } + +// RegisterEvent is the implementatino of the HTTP API Event method. +func (x *Operations) RegisterEvent(api huma.API) { + name := "Event" + description := "Submit an event" + path := "/event" + scopes := []string{"user", "write"} + method := http.MethodPost + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"events"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + }, func(ctx context.T, input *EventInput) ( + output *EventOutput, err error, + ) { + r := ctx.Value("http-request").(*http.Request) + // w := ctx.Value("http-response").(http.ResponseWriter) + rr := helpers.GetRemoteFromReq(r) + ev := &event.E{} + if _, err = ev.Unmarshal(input.RawBody); chk.E(err) { + err = huma.Error406NotAcceptable(err.Error()) + return + } + var ok bool + sto := x.Storage() + if sto == nil { + panic("no event store has been set to store event") + } + // advancedDeleter, _ := sto.(relay.AdvancedDeleter) + var valid bool + var pubkey []byte + valid, pubkey, err = httpauth.CheckAuth(r) + // missing := !errors.Is(err, httpauth.ErrMissingKey) + // if there is an error but not that the token is missing, or there is no error + // but the signature is invalid, return error that request is unauthorized. + if err != nil && !errors.Is(err, httpauth.ErrMissingKey) { + err = huma.Error400BadRequest(err.Error()) + return + } + err = nil + if !valid { + err = huma.Error401Unauthorized("Authorization header is invalid") + return + } + // if there was auth, or no auth, check the relay policy allows accepting the + // event (no auth with auth required or auth not valid for action can apply + // here). + // accept, notice, after := x.AcceptEvent(ctx, ev, r, rr, pubkey) + // if !accept { + // err = huma.Error401Unauthorized(notice) + // return + // } + if !bytes.Equal(ev.GetIDBytes(), ev.Id) { + err = huma.Error400BadRequest("event id is computed incorrectly") + return + } + if ok, err = ev.Verify(); chk.T(err) { + err = huma.Error400BadRequest("failed to verify signature") + return + } else if !ok { + err = huma.Error400BadRequest("signature is invalid") + return + } + if ev.Kind.K == kind.Deletion.K { + log.I.F("delete event\n%s", ev.Serialize()) + for _, t := range ev.Tags.ToSliceOfTags() { + var res []*event.E + if t.Len() >= 2 { + switch { + case bytes.Equal(t.Key(), []byte("e")): + evId := make([]byte, sha256.Size) + if _, err = hex.DecBytes( + evId, t.Value(), + ); chk.E(err) { + continue + } + res, err = sto.QueryEvents( + ctx, &filter.F{Ids: tag.New(evId)}, + ) + if err != nil { + err = huma.Error500InternalServerError(err.Error()) + return + } + for i := range res { + if res[i].Kind.Equal(kind.Deletion) { + err = huma.Error409Conflict("not processing or storing delete event containing delete event references") + } + if !bytes.Equal(res[i].Pubkey, ev.Pubkey) { + err = huma.Error409Conflict("cannot delete other users' events (delete by e tag)") + return + } + } + case bytes.Equal(t.Key(), []byte("a")): + split := bytes.Split(t.Value(), []byte{':'}) + if len(split) != 3 { + continue + } + var pk []byte + if pk, err = hex.DecAppend( + nil, split[1], + ); chk.E(err) { + err = huma.Error400BadRequest( + fmt.Sprintf( + "delete event a tag pubkey value invalid: %s", + t.Value(), + ), + ) + return + } + kin := ints.New(uint16(0)) + if _, err = kin.Unmarshal(split[0]); chk.E(err) { + err = huma.Error400BadRequest( + fmt.Sprintf( + "delete event a tag kind value invalid: %s", + t.Value(), + ), + ) + return + } + kk := kind.New(kin.Uint16()) + if kk.Equal(kind.Deletion) { + err = huma.Error403Forbidden("delete event kind may not be deleted") + return + } + if !kk.IsParameterizedReplaceable() { + err = huma.Error403Forbidden("delete tags with a tags containing non-parameterized-replaceable events cannot be processed") + return + } + if !bytes.Equal(pk, ev.Pubkey) { + log.I.S(pk, ev.Pubkey, ev) + err = huma.Error403Forbidden("cannot delete other users' events (delete by a tag)") + return + } + f := filter.New() + f.Kinds.K = []*kind.T{kk} + f.Authors.Append(pk) + f.Tags.AppendTags( + tag.New( + []byte{'#', 'd'}, split[2], + ), + ) + res, err = sto.QueryEvents(ctx, f) + if err != nil { + err = huma.Error500InternalServerError(err.Error()) + return + } + } + } + if len(res) < 1 { + continue + } + var resTmp []*event.E + for _, v := range res { + if ev.CreatedAt.U64() >= v.CreatedAt.U64() { + resTmp = append(resTmp, v) + } + } + res = resTmp + for _, target := range res { + if target.Kind.K == kind.Deletion.K { + err = huma.Error403Forbidden( + fmt.Sprintf( + "cannot delete delete event %s", ev.Id, + ), + ) + return + } + if target.CreatedAt.Int() > ev.CreatedAt.Int() { + // todo: shouldn't this be an error? + log.I.F( + "not deleting\n%d%\nbecause delete event is older\n%d", + target.CreatedAt.Int(), ev.CreatedAt.Int(), + ) + continue + } + if !bytes.Equal(target.Pubkey, ev.Pubkey) { + err = huma.Error403Forbidden("only author can delete event") + return + } + // if advancedDeleter != nil { + // advancedDeleter.BeforeDelete(ctx, t.Value(), ev.Pubkey) + // } + if err = sto.DeleteEvent( + ctx, target.EventId(), + ); chk.T(err) { + err = huma.Error500InternalServerError(err.Error()) + return + } + // if advancedDeleter != nil { + // advancedDeleter.AfterDelete(t.Value(), ev.Pubkey) + // } + } + res = nil + } + return + } + var reason []byte + ok, reason = x.AddEvent(ctx, x.Relay(), ev, r, rr, pubkey) + // return the response whether true or false and any reason if false + if ok { + } else { + err = huma.Error500InternalServerError(string(reason)) + } + // if after != nil { + // // do this in the background and let the http response close + // go after() + // } + output = &EventOutput{"event accepted"} + return + }, + ) +} diff --git a/openapi/http-events.go b/openapi/http-events.go new file mode 100644 index 0000000..5bbf69e --- /dev/null +++ b/openapi/http-events.go @@ -0,0 +1,124 @@ +package openapi + +import ( + "fmt" + "net/http" + "orly.dev/chk" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/hex" + "orly.dev/interfaces/store" + "orly.dev/realy/helpers" + "orly.dev/sha256" + "orly.dev/tag" +) + +// EventsInput is the parameters for an Events HTTP API method. Basically an array of eventid.T. +type EventsInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"` + Body []string `doc:"list of event Ids"` +} + +// RegisterEvents is the implementation of the HTTP API for Events. +func (x *Operations) RegisterEvents(api huma.API) { + name := "Events" + description := "Returns the full events from a list of event Ids as a line structured JSON." + path := "/events" + scopes := []string{"user", "read"} + method := http.MethodPost + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"events"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + DefaultStatus: 204, + }, func(ctx context.T, input *EventsInput) ( + output *huma.StreamResponse, err error, + ) { + // log.I.S(input) + // if len(input.Body) == 10000 { + // err = huma.Error400BadRequest( + // "cannot process more than 10000 events in a request") + // return + // } + // var authrequired bool + // if len(input.Body) > 1000 { + // authrequired = true + // } + // r := ctx.Value("http-request").(*http.Request) + // var valid bool + // var pubkey []byte + // valid, pubkey, err = httpauth.CheckAuth(r) + // // if there is an error but not that the token is missing, or there is no error + // // but the signature is invalid, return error that request is unauthorized. + // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) { + // err = huma.Error400BadRequest(err.Error()) + // return + // } + // err = nil + // if authrequired && len(pubkey) != schnorr.PubKeyBytesLen { + // err = huma.Error400BadRequest( + // "cannot process more than 1000 events in a request without being authenticated") + // return + // } + // if authrequired && valid { + // if len(x.Owners()) < 1 { + // err = huma.Error400BadRequest( + // "cannot process more than 1000 events in a request without auth enabled") + // return + // } + // if rl, ok := x.Relay().(*app.Relay); ok { + // rl.Lock() + // // we only allow the first level of the allowed users this kind of access + // if _, ok = rl.OwnersFollowed[string(pubkey)]; !ok { + // err = huma.Error403Forbidden( + // fmt.Sprintf( + // "authenticated user %0x does not have permission for this request (owners can use export)", + // pubkey)) + // return + // } + // } + // } + // if !valid { + // err = huma.Error401Unauthorized("Authorization header is invalid") + // return + // } + sto := x.Storage() + var evIds [][]byte + for _, id := range input.Body { + var idb []byte + if idb, err = hex.Dec(id); chk.E(err) { + err = huma.Error422UnprocessableEntity(err.Error()) + return + } + if len(idb) != sha256.Size { + err = huma.Error422UnprocessableEntity( + fmt.Sprintf( + "event Id must be 64 hex characters: '%s'", id, + ), + ) + } + evIds = append(evIds, idb) + } + if idsWriter, ok := sto.(store.GetIdsWriter); ok { + output = &huma.StreamResponse{ + func(ctx huma.Context) { + if err = idsWriter.FetchIds( + x.Context(), tag.New(evIds...), + ctx.BodyWriter(), + ); chk.E(err) { + return + } + }, + } + } + return + }, + ) +} diff --git a/openapi/http-export.go b/openapi/http-export.go new file mode 100644 index 0000000..614baf9 --- /dev/null +++ b/openapi/http-export.go @@ -0,0 +1,68 @@ +package openapi + +import ( + "net/http" + "orly.dev/log" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/realy/helpers" +) + +// ExportInput is the parameters for the HTTP API Export method. +type ExportInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"` +} + +// ExportOutput is the return value of Export. It usually will be line structured JSON. In +// future there may be more output formats. +type ExportOutput struct{ RawBody []byte } + +// RegisterExport implements the Export HTTP API method. +func (x *Operations) RegisterExport(api huma.API) { + name := "Export" + description := "Export all events (only works with NIP-98/JWT capable client, will not work with UI)" + path := "/export" + scopes := []string{"admin", "read"} + method := http.MethodGet + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"admin"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + }, func(ctx context.T, input *ExportInput) ( + resp *huma.StreamResponse, err error, + ) { + // r := ctx.Value("http-request").(*http.Request) + // rr := helpers.GetRemoteFromReq(r) + // log.I.F("processing export from %s", rr) + // // w := ctx.Value("http-response").(http.ResponseWriter) + // authed, pubkey := x.AdminAuth(r) + // if !authed { + // // pubkey = ev.Pubkey + // err = huma.Error401Unauthorized("Not Authorized") + // return + // } + // log.I.F("export of event data requested on admin port from %s pubkey %0x", + // rr, pubkey) + sto := x.Storage() + resp = &huma.StreamResponse{ + func(ctx huma.Context) { + ctx.SetHeader("Content-Type", "application/nostr+jsonl") + sto.Export(x.Context(), ctx.BodyWriter()) + if f, ok := ctx.BodyWriter().(http.Flusher); ok { + f.Flush() + } else { + log.W.F("error: unable to flush") + } + }, + } + return + }, + ) +} diff --git a/openapi/http-filter.go b/openapi/http-filter.go new file mode 100644 index 0000000..6e84bd8 --- /dev/null +++ b/openapi/http-filter.go @@ -0,0 +1,235 @@ +package openapi + +import ( + "net/http" + "orly.dev/chk" + "orly.dev/log" + "sort" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/filter" + "orly.dev/filters" + "orly.dev/hex" + "orly.dev/interfaces/store" + "orly.dev/kind" + "orly.dev/kinds" + "orly.dev/realy/helpers" + "orly.dev/tag" + "orly.dev/tags" + "orly.dev/timestamp" +) + +// SimpleFilter is the main parts of a filter.F that relate to event store indexes. +type SimpleFilter struct { + Kinds []int `json:"kinds,omitempty" doc:"array of kind numbers to match on"` + Authors []string `json:"authors,omitempty" doc:"array of author pubkeys to match on (hex encoded)"` + Tags [][]string `json:"tags,omitempty" doc:"array of tags to match on (first key of each '#x' and terms to match from the second field of the event tag)"` +} + +// FilterInput is the parameters for a Filter HTTP API call. +type FilterInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"` + Since int64 `query:"since" doc:"timestamp of the oldest events to return (inclusive)"` + Until int64 `query:"until" doc:"timestamp of the newest events to return (inclusive)"` + Limit uint `query:"limit" doc:"maximum number of results to return"` + Sort string `query:"sort" enum:"asc,desc" default:"desc" doc:"sort order by created_at timestamp"` + Body SimpleFilter `body:"filter" doc:"filter criteria to match for events to return"` +} + +// ToFilter converts a SimpleFilter input to a regular nostr filter.F. +func (fi FilterInput) ToFilter() (f *filter.F, err error) { + f = filter.New() + var ks []*kind.T + for _, k := range fi.Body.Kinds { + ks = append(ks, kind.New(k)) + } + f.Kinds = kinds.New(ks...) + var as [][]byte + for _, a := range fi.Body.Authors { + var b []byte + if b, err = hex.Dec(a); chk.E(err) { + return + } + as = append(as, b) + } + f.Authors = tag.New(as...) + var ts []*tag.T + for _, t := range fi.Body.Tags { + ts = append(ts, tag.New(t...)) + } + f.Tags = tags.New(ts...) + if fi.Limit != 0 { + f.Limit = &fi.Limit + } + if fi.Since != 0 { + f.Since = timestamp.New(fi.Since) + } + if fi.Until != 0 { + f.Until = timestamp.New(fi.Until) + } + return +} + +// FilterOutput is a list of event Ids that match the query in the sort order requested. +type FilterOutput struct { + Body []string `doc:"list of event Ids that mach the query in the sort order requested"` +} + +// RegisterFilter is the implementation of the HTTP API Filter method. +func (x *Operations) RegisterFilter(api huma.API) { + name := "Filter" + description := "Search for events and receive a sorted list of event Ids (one of authors, kinds or tags must be present)" + path := "/filter" + scopes := []string{"user", "read"} + method := http.MethodPost + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"events"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + }, func(ctx context.T, input *FilterInput) ( + output *FilterOutput, err error, + ) { + log.I.S(input) + var f *filter.F + if f, err = input.ToFilter(); chk.E(err) { + err = huma.Error422UnprocessableEntity(err.Error()) + return + } + log.I.F("%s", f.Marshal(nil)) + // r := ctx.Value("http-request").(*http.Request) + // rr := helpers.GetRemoteFromReq(r) + // if len(input.Body.Authors) < 1 && len(input.Body.Kinds) < 1 && len(input.Body.Tags) < 1 { + // err = huma.Error400BadRequest( + // "cannot process filter with none of Authors/Kinds/Tags") + // return + // } + // var valid bool + // var pubkey []byte + // valid, pubkey, err = httpauth.CheckAuth(r) + // if there is an error but not that the token is missing, or there is no error + // but the signature is invalid, return error that request is unauthorized. + // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) { + // err = huma.Error400BadRequest(err.Error()) + // return + // } + // err = nil + // if !valid { + // err = huma.Error401Unauthorized("Authorization header is invalid") + // return + // } + allowed := filters.New(f) + // if accepter, ok := x.Relay().(relay.ReqAcceptor); ok { + // var accepted, modified bool + // allowed, accepted, modified = accepter.AcceptReq(x.Context(), r, nil, + // filters.New(f), pubkey) + // if !accepted { + // err = huma.Error401Unauthorized("auth to get access for this filter") + // return + // } else if modified { + // log.D.F("filter modified %s", allowed.F[0]) + // } + // } + // if len(allowed.F) == 0 { + // err = huma.Error401Unauthorized("all kinds in event restricted; auth to get access for this filter") + // return + // } + // if f.Kinds.IsPrivileged() { + // if auther, ok := x.Relay().(relay.Authenticator); ok && auther.AuthRequired() { + // log.F.F("privileged request\n%s", f.Serialize()) + // senders := f.Authors + // receivers := f.Tags.GetAll(tag.New("#p")) + // switch { + // case len(pubkey) == 0: + // err = huma.Error401Unauthorized("auth required for processing request due to presence of privileged kinds (DMs, app specific data)") + // return + // case senders.Contains(pubkey) || receivers.ContainsAny([]byte("#p"), + // tag.New(pubkey)): + // log.F.F("user %0x from %s allowed to query for privileged event", + // pubkey, rr) + // default: + // err = huma.Error403Forbidden(fmt.Sprintf( + // "authenticated user %0x does not have authorization for "+ + // "requested filters", pubkey)) + // } + // } + // } + sto := x.Storage() + var ok bool + var quer store.Querier + if quer, ok = sto.(store.Querier); !ok { + err = huma.Error501NotImplemented("simple filter request not implemented") + return + } + var evs []store.IdPkTs + if evs, err = quer.QueryForIds( + x.Context(), allowed.F[0], + ); chk.E(err) { + err = huma.Error500InternalServerError( + "error querying for events", err, + ) + return + } + if input.Limit > 0 { + evs = evs[:input.Limit] + } + switch input.Sort { + case "asc": + sort.Slice( + evs, func(i, j int) bool { + return evs[i].Ts < evs[j].Ts + }, + ) + case "desc": + sort.Slice( + evs, func(i, j int) bool { + return evs[i].Ts > evs[j].Ts + }, + ) + } + // if len(pubkey) > 0 { + // // remove events from results if we find the user's mute list, that are present + // // on this list + // var mutes event.Ts + // if mutes, err = sto.QueryEvents(x.Context(), &filter.F{Authors: tag.New(pubkey), + // Kinds: kinds.New(kind.MuteList)}); !chk.E(err) { + // var mutePubs [][]byte + // for _, ev := range mutes { + // for _, t := range ev.Tags.ToSliceOfTags() { + // if bytes.Equal(t.Key(), []byte("p")) { + // var p []byte + // if p, err = hex.Dec(string(t.Value())); chk.E(err) { + // continue + // } + // mutePubs = append(mutePubs, p) + // } + // } + // } + // var tmp []store.IdTsPk + // next: + // for _, ev := range evs { + // for _, pk := range mutePubs { + // if bytes.Equal(ev.Pub, pk) { + // continue next + // } + // } + // tmp = append(tmp, ev) + // } + // // log.I.ToSliceOfBytes("done") + // evs = tmp + // } + // } + output = &FilterOutput{} + for _, ev := range evs { + output.Body = append(output.Body, hex.Enc(ev.Id)) + } + return + }, + ) +} diff --git a/openapi/http-import.go b/openapi/http-import.go new file mode 100644 index 0000000..4fc55b1 --- /dev/null +++ b/openapi/http-import.go @@ -0,0 +1,71 @@ +package openapi + +import ( + "bytes" + "github.com/danielgtaylor/huma/v2" + "net/http" + + "orly.dev/context" + "orly.dev/realy/helpers" +) + +// ImportInput is the parameters of an import operation, authentication and the stream of line +// structured JSON events. +type ImportInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 token for authentication" required:"true"` + RawBody []byte +} + +// ImportOutput is nothing, basically, a 204 or 200 status is expected. +type ImportOutput struct{} + +// RegisterImport is the implementation of the Import operation. +func (x *Operations) RegisterImport(api huma.API) { + name := "Import" + description := "Import events from line structured JSON (jsonl)" + path := "/import" + scopes := []string{"admin", "write"} + method := http.MethodPost + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"admin"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + DefaultStatus: 204, + }, + func(ctx context.T, input *ImportInput) (wgh *ImportOutput, err error) { + // r := ctx.Value("http-request").(*http.Request) + // rr := helpers.GetRemoteFromReq(r) + // authed, pubkey := x.AdminAuth(r, time.Minute*10) + // if !authed { + // // pubkey = ev.Pubkey + // err = huma.Error401Unauthorized( + // fmt.Sprintf("user %0x not authorized for action", pubkey)) + // return + // } + sto := x.Storage() + if len(input.RawBody) > 0 { + read := bytes.NewBuffer(input.RawBody) + sto.Import(read) + // if realy, ok := x.Relay().(*app.Relay); ok { + // realy.ZeroLists() + // realy.CheckOwnerLists(context.Bg()) + // } + // } else { + // log.I.F("import of event data requested on admin port from %s pubkey %0x", rr, + // pubkey) + // read := io.LimitReader(r.Body, r.ContentLength) + // sto.Import(read) + // if realy, ok := x.Relay().(*app.Relay); ok { + // realy.ZeroLists() + // realy.CheckOwnerLists(context.Bg()) + // } + } + return + }, + ) +} diff --git a/openapi/http-nuke.go b/openapi/http-nuke.go new file mode 100644 index 0000000..fb0ce4b --- /dev/null +++ b/openapi/http-nuke.go @@ -0,0 +1,71 @@ +package openapi + +import ( + "net/http" + "orly.dev/chk" + "orly.dev/log" + "strings" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/interfaces/store" + "orly.dev/realy/helpers" +) + +// NukeInput is the parameters for the HTTP API method nuke. Note that it has a confirmation +// header that must be provided to prevent accidental invocation of this method. +type NukeInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"` + Confirm string `header:"X-Confirm" doc:"must put 'Yes I Am Sure' in this field as confirmation"` +} + +// NukeOutput is basically nothing, a 200 or 204 HTTP status response is normal. +type NukeOutput struct{} + +// RegisterNuke is the implementation of the Wipe HTTP API method. +func (x *Operations) RegisterNuke(api huma.API) { + name := "Wipe" + description := "Wipe all events in the database" + path := "/nuke" + scopes := []string{"admin", "write"} + method := http.MethodGet + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"admin"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + DefaultStatus: 204, + }, func(ctx context.T, input *NukeInput) (wgh *NukeOutput, err error) { + // r := ctx.Value("http-request").(*http.Request) + // // w := ctx.Value("http-response").(http.ResponseWriter) + // rr := helpers.GetRemoteFromReq(r) + // authed, pubkey := x.AdminAuth(r) + // if !authed { + // // pubkey = ev.Pubkey + // err = huma.Error401Unauthorized("user not authorized for action") + // return + // } + if input.Confirm != "Yes I Am Sure" { + err = huma.Error403Forbidden("Confirm missing or incorrect") + return + } + // log.I.F("database nuke request from %s pubkey %0x", rr, pubkey) + sto := x.Storage() + if nuke, ok := sto.(store.Wiper); ok { + log.I.F("rescanning") + if err = nuke.Wipe(); chk.E(err) { + if strings.HasPrefix(err.Error(), "Value log GC attempt") { + err = nil + } + return + } + } + return + }, + ) +} diff --git a/openapi/http-relay.go b/openapi/http-relay.go new file mode 100644 index 0000000..e5693a2 --- /dev/null +++ b/openapi/http-relay.go @@ -0,0 +1,95 @@ +package openapi + +import ( + "bytes" + "net/http" + "orly.dev/chk" + "orly.dev/log" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/realy/helpers" +) + +// RelayInput is the parameters for the Event HTTP API method. +type RelayInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"` + RawBody []byte +} + +// RelayOutput is the return parameters for the HTTP API Relay method. +type RelayOutput struct{ Body string } + +// RegisterRelay is the implementatino of the HTTP API Relay method. +func (x *Operations) RegisterRelay(api huma.API) { + name := "relay" + description := "relay an event, don't store it" + path := "/relay" + scopes := []string{"user"} + method := http.MethodPost + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"events"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + }, func(ctx context.T, input *RelayInput) ( + output *RelayOutput, err error, + ) { + log.I.S(input) + // r := ctx.Value("http-request").(*http.Request) + // rr := helpers.GetRemoteFromReq(r) + // var valid bool + // var pubkey []byte + // valid, pubkey, err = httpauth.CheckAuth(r) + // // if there is an error but not that the token is missing, or there is no error + // // but the signature is invalid, return error that request is unauthorized. + // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) { + // err = huma.Error400BadRequest(err.Error()) + // return + // } + // err = nil + // if !valid { + // err = huma.Error401Unauthorized("Authorization header is invalid") + // return + // } + var ok bool + // if there was auth, or no auth, check the relay policy allows accepting the + // event (no auth with auth required or auth not valid for action can apply + // here). + ev := &event.E{} + if _, err = ev.Unmarshal(input.RawBody); chk.E(err) { + err = huma.Error406NotAcceptable(err.Error()) + return + } + // accept, notice, _ := x.AcceptEvent(ctx, ev, r, rr, pubkey) + // if !accept { + // err = huma.Error401Unauthorized(notice) + // return + // } + if !bytes.Equal(ev.GetIDBytes(), ev.Id) { + err = huma.Error400BadRequest("event id is computed incorrectly") + return + } + if ok, err = ev.Verify(); chk.T(err) { + err = huma.Error400BadRequest("failed to verify signature") + return + } else if !ok { + err = huma.Error400BadRequest("signature is invalid") + return + } + // var authRequired bool + // var ar relay.Authenticator + // if ar, ok = x.Relay().(relay.Authenticator); ok { + // authRequired = ar.AuthRequired() + // } + x.Publisher().Deliver(ev) + return + }, + ) +} diff --git a/openapi/http-rescan.go b/openapi/http-rescan.go new file mode 100644 index 0000000..7577c01 --- /dev/null +++ b/openapi/http-rescan.go @@ -0,0 +1,58 @@ +package openapi + +import ( + "net/http" + "orly.dev/chk" + "orly.dev/log" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/interfaces/store" + "orly.dev/realy/helpers" +) + +type RescanInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"` +} + +type RescanOutput struct{} + +func (x *Operations) RegisterRescan(api huma.API) { + name := "Rescan" + description := "Rescan all events and rewrite their indexes (to enable new indexes on old events)" + path := "/rescan" + scopes := []string{"admin"} + method := http.MethodGet + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"admin"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + DefaultStatus: 204, + }, + func(ctx context.T, input *RescanInput) (wgh *RescanOutput, err error) { + // r := ctx.Value("http-request").(*http.Request) + // rr := helpers.GetRemoteFromReq(r) + // authed, pubkey := x.AdminAuth(r) + // if !authed { + // err = huma.Error401Unauthorized("not authorized") + // return + // } + // log.I.F("index rescan requested on admin port from %s pubkey %0x", + // rr, pubkey) + sto := x.Storage() + if rescanner, ok := sto.(store.Rescanner); ok { + log.I.F("rescanning") + if err = rescanner.Rescan(); chk.E(err) { + return + } + } + return + }, + ) +} diff --git a/openapi/http-shutdown.go b/openapi/http-shutdown.go new file mode 100644 index 0000000..326477a --- /dev/null +++ b/openapi/http-shutdown.go @@ -0,0 +1,51 @@ +package openapi + +import ( + "net/http" + "time" + + "github.com/danielgtaylor/huma/v2" + + "orly.dev/context" + "orly.dev/realy/helpers" +) + +type ShutdownInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"` +} + +type ShutdownOutput struct{} + +func (x *Operations) RegisterShutdown(api huma.API) { + name := "Shutdown" + description := "Shutdown relay" + path := "/shutdown" + scopes := []string{"admin"} + method := http.MethodGet + huma.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"admin"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + DefaultStatus: 204, + }, func(ctx context.T, input *ShutdownInput) ( + wgh *ShutdownOutput, err error, + ) { + // r := ctx.Value("http-request").(*http.Request) + // authed, _ := x.AdminAuth(r) + // if !authed { + // err = huma.Error401Unauthorized("authorization required") + // return + // } + go func() { + time.Sleep(time.Second) + x.Shutdown() + }() + return + }, + ) +} diff --git a/openapi/http-subscribe.go b/openapi/http-subscribe.go new file mode 100644 index 0000000..48bb5ce --- /dev/null +++ b/openapi/http-subscribe.go @@ -0,0 +1,158 @@ +package openapi + +import ( + "net/http" + "orly.dev/chk" + "orly.dev/log" + + "github.com/danielgtaylor/huma/v2" + "github.com/danielgtaylor/huma/v2/sse" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/filters" + "orly.dev/hex" + "orly.dev/kind" + "orly.dev/kinds" + "orly.dev/realy/helpers" + "orly.dev/tag" + "orly.dev/tags" +) + +type SubscribeInput struct { + Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"` + Accept string `header:"Accept" default:"text/event-stream" enum:"text/event-stream" required:"true"` + // ContentType string `header:"Content-Type" default:"text/event-stream" enum:"text/event-stream" required:"true"` + Body SimpleFilter `body:"filter" doc:"filter criteria to match for events to return"` +} + +func (fi SubscribeInput) ToFilter() (f *filter.F, err error) { + f = filter.New() + var ks []*kind.T + for _, k := range fi.Body.Kinds { + ks = append(ks, kind.New(k)) + } + f.Kinds = kinds.New(ks...) + var as [][]byte + for _, a := range fi.Body.Authors { + var b []byte + if b, err = hex.Dec(a); chk.E(err) { + return + } + as = append(as, b) + } + f.Authors = tag.New(as...) + var ts []*tag.T + for _, t := range fi.Body.Tags { + ts = append(ts, tag.New(t...)) + } + f.Tags = tags.New(ts...) + return +} + +func (x *Operations) RegisterSubscribe(api huma.API) { + name := "Subscribe" + description := "Subscribe for newly published events by author, kind or tags; empty also allowed, which just sends all incoming events - uses Server Sent Events format for compatibility with standard libraries." + path := "/subscribe" + scopes := []string{"user", "read"} + method := http.MethodPost + sse.Register( + api, huma.Operation{ + OperationID: name, + Summary: name, + Path: path, + Method: method, + Tags: []string{"events"}, + Description: helpers.GenerateDescription(description, scopes), + Security: []map[string][]string{{"auth": scopes}}, + }, + map[string]any{ + "event": event.J{}, + }, + func(ctx context.T, input *SubscribeInput, send sse.Sender) { + log.I.S(input) + var err error + var f *filter.F + if f, err = input.ToFilter(); chk.E(err) { + err = huma.Error422UnprocessableEntity(err.Error()) + return + } + log.I.F("%s", f.Marshal(nil)) + r := ctx.Value("http-request").(*http.Request) + // rr := helpers.GetRemoteFromReq(r) + // var valid bool + // var pubkey []byte + // valid, pubkey, err = httpauth.CheckAuth(r) + // // if there is an error but not that the token is missing, or there is no error + // // but the signature is invalid, return error that request is unauthorized. + // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) { + // err = huma.Error400BadRequest(err.Error()) + // return + // } + // err = nil + // if !valid { + // err = huma.Error401Unauthorized("Authorization header is invalid") + // return + // } + allowed := filters.New(f) + // if accepter, ok := x.Relay().(relay.ReqAcceptor); ok { + // var accepted, modified bool + // allowed, accepted, modified = accepter.AcceptReq(x.Context(), r, nil, + // filters.New(f), + // pubkey) + // if !accepted { + // err = huma.Error401Unauthorized("auth to get access for this filter") + // return + // } else if modified { + // log.D.F("filter modified %s", allowed.F[0]) + // } + // } + if len(allowed.F) == 0 { + err = huma.Error401Unauthorized("all kinds in event restricted; auth to get access for this filter") + return + } + // if f.Kinds.IsPrivileged() { + // if auther, ok := x.Relay().(relay.Authenticator); ok && auther.AuthRequired() { + // log.F.F("privileged request\n%s", f.Serialize()) + // senders := f.Authors + // receivers := f.Tags.GetAll(tag.New("#p")) + // switch { + // case len(pubkey) == 0: + // err = huma.Error401Unauthorized("auth required for processing request due to presence of privileged kinds (DMs, app specific data)") + // return + // case senders.Contains(pubkey) || receivers.ContainsAny([]byte("#p"), + // tag.New(pubkey)): + // log.F.F("user %0x from %s allowed to query for privileged event", + // pubkey, rr) + // default: + // err = huma.Error403Forbidden(fmt.Sprintf( + // "authenticated user %0x does not have authorization for "+ + // "requested filters", pubkey)) + // } + // } + // } + // register the filter with the listeners + receiver := make(event.C, 32) + x.Publisher().Receive( + &H{ + Ctx: r.Context(), + Receiver: receiver, + // Pubkey: pubkey, + Filter: f, + }, + ) + out: + for { + select { + case <-r.Context().Done(): + break out + case ev := <-receiver: + if err = send.Data(ev.ToEventJ()); chk.E(err) { + } + } + } + return + }, + ) +} diff --git a/openapi/huma.go b/openapi/huma.go new file mode 100644 index 0000000..5015343 --- /dev/null +++ b/openapi/huma.go @@ -0,0 +1,49 @@ +package openapi + +import ( + "net/http" + + "github.com/danielgtaylor/huma/v2" + "github.com/danielgtaylor/huma/v2/adapters/humago" +) + +// ExposeMiddleware adds the http.Request and http.ResponseWriter to the context +// for the Operations handler. +func ExposeMiddleware(ctx huma.Context, next func(huma.Context)) { + // Unwrap the request and response objects. + r, w := humago.Unwrap(ctx) + ctx = huma.WithValue(ctx, "http-request", r) + ctx = huma.WithValue(ctx, "http-response", w) + next(ctx) +} + +// NewHuma creates a new huma.API with a Scalar docs UI, and a middleware that allows methods to +// access the http.Request and http.ResponseWriter. +func NewHuma(router *ServeMux, name, version, description string) (api huma.API) { + config := huma.DefaultConfig(name, version) + config.Info.Description = description + config.DocsPath = "" + router.ServeMux.HandleFunc("/api", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + w.Write([]byte(` + + + realy HTTP API UI + + + + + + + +`)) + }) + + api = humago.New(router, config) + api.UseMiddleware(ExposeMiddleware) + return +} diff --git a/openapi/publisher-openapi.go b/openapi/publisher-openapi.go new file mode 100644 index 0000000..490b1cb --- /dev/null +++ b/openapi/publisher-openapi.go @@ -0,0 +1,96 @@ +package openapi + +import ( + "sync" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/realy/publish/publisher" +) + +const Type = "openapi" + +// H is the control structure for a HTTP SSE subscription, including the filter, authed +// pubkey and a channel to send the events to. +type H struct { + // Ctx is the http.Request context of the subscriber, this enables garbage + // collecting the subscriptions from http. + Ctx context.T + // Receiver is a channel that the listener sends subscription events to for http + // subscribe endpoint. + Receiver event.C + // // Pubkey is the pubkey authed to this subscription + // Pubkey []byte + // Filter is the filter associated with the http subscription + Filter *filter.F +} + +func (h *H) Type() string { return Type } + +// Map is a collection of H TTP subscriptions. +type Map map[*H]struct{} + +type S struct { + // Map is the map of subscriptions from the http api. + Map + // HLock is the mutex that locks the Map. + Mx sync.Mutex +} + +var _ publisher.I = &S{} + +func New() *S { return &S{Map: make(Map)} } + +func (p *S) Type() string { return Type } + +func (p *S) Receive(msg publisher.Message) { + if m, ok := msg.(*H); ok { + p.Mx.Lock() + p.Map[m] = struct{}{} + p.Mx.Unlock() + } +} + +func (p *S) Deliver(ev *event.E) { + p.Mx.Lock() + var subs []*H + for sub := range p.Map { + // check if the subscription's subscriber is still alive + select { + case <-sub.Ctx.Done(): + subs = append(subs, sub) + default: + } + } + for _, sub := range subs { + delete(p.Map, sub) + } + subs = subs[:0] + for sub := range p.Map { + // if auth required, check the subscription pubkey matches + // if !publicReadable { + // if authRequired && len(sub.Pubkey) == 0 { + // continue + // } + // } + // if the filter doesn't match, skip + if !sub.Filter.Matches(ev) { + continue + } + // // if the filter is privileged and the user doesn't have matching auth, skip + // if ev.Kind.IsPrivileged() { + // ab := sub.Pubkey + // var containsPubkey bool + // if ev.Tags != nil { + // containsPubkey = ev.Tags.ContainsAny([]byte{'p'}, tag.New(ab)) + // } + // if !bytes.Equal(ev.Pubkey, ab) || containsPubkey { + // continue + // } + // } + // send the event to the subscriber + sub.Receiver <- ev + } + p.Mx.Unlock() +} diff --git a/openapi/serveMux.go b/openapi/serveMux.go new file mode 100644 index 0000000..50ab7f0 --- /dev/null +++ b/openapi/serveMux.go @@ -0,0 +1,21 @@ +package openapi + +import "net/http" + +type ServeMux struct { + *http.ServeMux +} + +func NewServeMux() *ServeMux { + return &ServeMux{http.NewServeMux()} +} + +func (c *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization") + if r.Method == http.MethodOptions { + return + } + c.ServeMux.ServeHTTP(w, r) +} diff --git a/p256k/btcec.go b/p256k/btcec.go index a8a3996..e3e6e6f 100644 --- a/p256k/btcec.go +++ b/p256k/btcec.go @@ -3,7 +3,6 @@ package p256k import ( - "orly.dev/log" "orly.dev/p256k/btcec" ) diff --git a/p256k/btcec/btcec.go b/p256k/btcec/btcec.go index 4be841e..4102b73 100644 --- a/p256k/btcec/btcec.go +++ b/p256k/btcec/btcec.go @@ -14,6 +14,7 @@ import ( type Signer struct { SecretKey *secp256k1.SecretKey PublicKey *secp256k1.PublicKey + BTCECSec *ec.SecretKey pkb, skb []byte } @@ -25,22 +26,14 @@ func (s *Signer) Generate() (err error) { return } s.skb = s.SecretKey.Serialize() + s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb) s.PublicKey = s.SecretKey.PubKey() s.pkb = schnorr.SerializePubKey(s.PublicKey) return } -// GenerateForECDH creates a new Signer. -func (s *Signer) GenerateForECDH() (err error) { - return s.Generate() -} - -func (s *Signer) InitECDH() { - // noop because this isn't needed in this version -} - // InitSec initialises a Signer using raw secret key bytes. -func (s *Signer) InitSec(sec []byte, _ ...bool) (err error) { +func (s *Signer) InitSec(sec []byte) (err error) { if len(sec) != secp256k1.SecKeyBytesLen { err = errorf.E("sec key must be %d bytes", secp256k1.SecKeyBytesLen) return @@ -48,6 +41,7 @@ func (s *Signer) InitSec(sec []byte, _ ...bool) (err error) { s.SecretKey = secp256k1.SecKeyFromBytes(sec) s.PublicKey = s.SecretKey.PubKey() s.pkb = schnorr.SerializePubKey(s.PublicKey) + s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb) return } @@ -112,7 +106,7 @@ func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) { ); chk.E(err) { return } - secret = ec.GenerateSharedSecret(s.SecretKey, pub) + secret = ec.GenerateSharedSecret(s.BTCECSec, pub) return } diff --git a/p256k/btcec/btcec_test.go b/p256k/btcec/btcec_test.go index 3f46aa0..2b7bd52 100644 --- a/p256k/btcec/btcec_test.go +++ b/p256k/btcec/btcec_test.go @@ -1,13 +1,16 @@ package btcec_test import ( + "bufio" "bytes" "testing" "time" - "orly.dev/chk" - "orly.dev/log" + "orly.dev/ec/schnorr" + "orly.dev/event" + "orly.dev/event/examples" "orly.dev/p256k/btcec" + "orly.dev/sha256" ) func TestSigner_Generate(t *testing.T) { @@ -26,17 +29,96 @@ func TestSigner_Generate(t *testing.T) { } func TestBTCECSignerVerify(t *testing.T) { + evs := make([]*event.E, 0, 10000) + scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) + buf := make([]byte, 1_000_000) + scanner.Buffer(buf, len(buf)) + var err error + signer := &btcec.Signer{} + for scanner.Scan() { + var valid bool + b := scanner.Bytes() + ev := event.New() + if _, err = ev.Unmarshal(b); chk.E(err) { + t.Errorf("failed to marshal\n%s", b) + } else { + if valid, err = ev.Verify(); chk.E(err) || !valid { + t.Errorf("invalid signature\n%s", b) + continue + } + } + id := ev.GetIDBytes() + if len(id) != sha256.Size { + t.Errorf("id should be 32 bytes, got %d", len(id)) + continue + } + if err = signer.InitPub(ev.Pubkey); chk.E(err) { + t.Errorf("failed to init pub key: %s\n%0x", err, b) + } + if valid, err = signer.Verify(id, ev.Sig); chk.E(err) { + t.Errorf("failed to verify: %s\n%0x", err, b) + } + if !valid { + t.Errorf( + "invalid signature for pub %0x %0x %0x", ev.Pubkey, id, + ev.Sig, + ) + } + evs = append(evs, ev) + } } func TestBTCECSignerSign(t *testing.T) { + evs := make([]*event.E, 0, 10000) + scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) + buf := make([]byte, 1_000_000) + scanner.Buffer(buf, len(buf)) + var err error + signer := &btcec.Signer{} + var skb []byte + if err = signer.Generate(); chk.E(err) { + t.Fatal(err) + } + skb = signer.Sec() + if err = signer.InitSec(skb); chk.E(err) { + t.Fatal(err) + } + verifier := &btcec.Signer{} + pkb := signer.Pub() + if err = verifier.InitPub(pkb); chk.E(err) { + t.Fatal(err) + } + for scanner.Scan() { + b := scanner.Bytes() + ev := event.New() + if _, err = ev.Unmarshal(b); chk.E(err) { + t.Errorf("failed to marshal\n%s", b) + } + evs = append(evs, ev) + } + var valid bool + sig := make([]byte, schnorr.SignatureSize) + for _, ev := range evs { + ev.Pubkey = pkb + id := ev.GetIDBytes() + if sig, err = signer.Sign(id); chk.E(err) { + t.Errorf("failed to sign: %s\n%0x", err, id) + } + if valid, err = verifier.Verify(id, sig); chk.E(err) { + t.Errorf("failed to verify: %s\n%0x", err, id) + } + if !valid { + t.Errorf("invalid signature") + } + } + signer.Zero() } func TestBTCECECDH(t *testing.T) { n := time.Now() var err error var counter int - const total = 200 - var count int + const total = 100 for _ = range total { s1 := new(btcec.Signer) if err = s1.Generate(); chk.E(err) { @@ -62,16 +144,13 @@ func TestBTCECECDH(t *testing.T) { secret2, ) } - count++ } } a := time.Now() duration := a.Sub(n) log.I.Ln( - "errors", counter, - "total", count, - "time", duration, - "time/op", duration/time.Duration(count), - "ops/sec", int(time.Second)/int(duration/time.Duration(count)), + "errors", counter, "total", total, "time", duration, "time/op", + int(duration/total), + "ops/sec", int(time.Second)/int(duration/total), ) } diff --git a/p256k/btcec/util_test.go b/p256k/btcec/util_test.go new file mode 100644 index 0000000..bcbd11a --- /dev/null +++ b/p256k/btcec/util_test.go @@ -0,0 +1,9 @@ +package btcec_test + +import ( + "orly.dev/lol" +) + +var ( + log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf +) diff --git a/p256k/doc.go b/p256k/doc.go index 990a438..d88e08e 100644 --- a/p256k/doc.go +++ b/p256k/doc.go @@ -2,5 +2,5 @@ // bitcoin/libsecp256k1 library for fast signature creation and verification of // the BIP-340 nostr X-only signatures and public keys, and ECDH. // -// Currently, the ECDH is only implemented with the btcec library. +// Currently the ECDH is only implemented with the btcec library. package p256k diff --git a/p256k/p256k.go b/p256k/p256k.go index 9eea48e..da66883 100644 --- a/p256k/p256k.go +++ b/p256k/p256k.go @@ -37,18 +37,6 @@ var _ realy.I = &Signer{} // Generate a new Signer key pair using the CGO bindings to libsecp256k1 func (s *Signer) Generate() (err error) { - var cs *Sec - var cx *XPublicKey - if s.skb, s.pkb, cs, cx, err = Generate(); chk.E(err) { - return - } - s.SecretKey = &cs.Key - s.PublicKey = cx.Key - return -} - -// GenerateForECDH a new Signer key pair using the CGO bindings to libsecp256k1 -func (s *Signer) GenerateForECDH() (err error) { var cs *Sec var cx *XPublicKey if s.skb, s.pkb, cs, cx, err = Generate(); chk.E(err) { @@ -60,9 +48,10 @@ func (s *Signer) GenerateForECDH() (err error) { return } -func (s *Signer) InitSec(skb []byte, nobtcec ...bool) (err error) { +func (s *Signer) InitSec(skb []byte) (err error) { var cs *Sec var cx *XPublicKey + // var cp *PublicKey if s.pkb, cs, cx, err = FromSecretBytes(skb); chk.E(err) { if err.Error() != "provided secret generates a public key with odd Y coordinate, fixed version returned" { log.E.Ln(err) @@ -74,10 +63,7 @@ func (s *Signer) InitSec(skb []byte, nobtcec ...bool) (err error) { s.PublicKey = cx.Key // s.ECPublicKey = cp.Key // needed for ecdh - if len(nobtcec) > 0 && nobtcec[0] != true { - } else { - s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb) - } + s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb) return } @@ -94,6 +80,8 @@ func (s *Signer) InitPub(pub []byte) (err error) { func (s *Signer) Sec() (b []byte) { return s.skb } func (s *Signer) Pub() (b []byte) { return s.pkb } +// func (s *Signer) ECPub() (b []byte) { return s.pkb } + func (s *Signer) Sign(msg []byte) (sig []byte, err error) { if s.SecretKey == nil { err = errorf.E("p256k: I secret not initialized") @@ -125,18 +113,7 @@ func (s *Signer) Verify(msg, sig []byte) (valid bool, err error) { return } -func (s *Signer) InitECDH() { - s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb) -} - func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) { - if s.BTCECSec == nil { - if s.skb == nil { - err = errorf.E("p256k: Secret key bytes not initialized") - return - } - s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb) - } var pub *secp256k1.PublicKey if pub, err = secp256k1.ParsePubKey( append( diff --git a/p256k/p256k_test.go b/p256k/p256k_test.go index 4bd9a7e..a094cc9 100644 --- a/p256k/p256k_test.go +++ b/p256k/p256k_test.go @@ -3,13 +3,15 @@ package p256k_test import ( + "bufio" "bytes" + "crypto/sha256" "testing" "time" - "github.com/minio/sha256-simd" - "orly.dev/chk" - "orly.dev/log" + "orly.dev/ec/schnorr" + "orly.dev/event" + "orly.dev/event/examples" "orly.dev/p256k" realy "orly.dev/signer" ) @@ -30,43 +32,94 @@ func TestSigner_Generate(t *testing.T) { } func TestSignerVerify(t *testing.T) { - // Initialize a new signer + // evs := make([]*event.E, 0, 10000) + scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) + buf := make([]byte, 1_000_000) + scanner.Buffer(buf, len(buf)) + var err error signer := &p256k.Signer{} - err := signer.Generate() - if chk.E(err) { - t.Fatalf("Failed to generate signer key pair: %v", err) + for scanner.Scan() { + var valid bool + b := scanner.Bytes() + bc := make([]byte, 0, len(b)) + bc = append(bc, b...) + ev := event.New() + if _, err = ev.Unmarshal(b); chk.E(err) { + t.Errorf("failed to marshal\n%s", b) + } else { + if valid, err = ev.Verify(); chk.T(err) || !valid { + t.Errorf("invalid signature\n%s", bc) + continue + } + } + id := ev.GetIDBytes() + if len(id) != sha256.Size { + t.Errorf("id should be 32 bytes, got %d", len(id)) + continue + } + if err = signer.InitPub(ev.Pubkey); chk.T(err) { + t.Errorf("failed to init pub key: %s\n%0x", err, ev.Pubkey) + continue + } + if valid, err = signer.Verify(id, ev.Sig); chk.E(err) { + t.Errorf("failed to verify: %s\n%0x", err, ev.Id) + continue + } + if !valid { + t.Errorf( + "invalid signature for\npub %0x\neid %0x\nsig %0x\n%s", + ev.Pubkey, id, ev.Sig, bc, + ) + continue + } + // fmt.Printf("%s\n", bc) + // evs = append(evs, ev) } +} - // Sample message to sign - message := sha256.Sum256([]byte("Hello, world!")) - // Sign the message - signature, err := signer.Sign(message[:]) - if chk.E(err) { - t.Fatalf("Failed to sign message: %v", err) +func TestSignerSign(t *testing.T) { + evs := make([]*event.E, 0, 10000) + scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) + buf := make([]byte, 1_000_000) + scanner.Buffer(buf, len(buf)) + var err error + signer := &p256k.Signer{} + var skb, pkb []byte + if skb, pkb, _, _, err = p256k.Generate(); chk.E(err) { + t.Fatal(err) } - - // Verify the signature - valid, err := signer.Verify(message[:], signature) - if chk.E(err) { - t.Fatalf("Error verifying signature: %v", err) + log.I.S(skb, pkb) + if err = signer.InitSec(skb); chk.E(err) { + t.Fatal(err) } - - // Check if the signature is valid - if !valid { - t.Error("Valid signature was rejected") + verifier := &p256k.Signer{} + if err = verifier.InitPub(pkb); chk.E(err) { + t.Fatal(err) } - - // Modify the message and verify again - tamperedMessage := sha256.Sum256([]byte("Hello, tampered world!")) - valid, err = signer.Verify(tamperedMessage[:], signature) - if !chk.E(err) { - t.Fatalf("Error verifying tampered message: %v", err) + for scanner.Scan() { + b := scanner.Bytes() + ev := event.New() + if _, err = ev.Unmarshal(b); chk.E(err) { + t.Errorf("failed to marshal\n%s", b) + } + evs = append(evs, ev) } - - // Expect the verification to fail - if valid { - t.Error("Invalid signature was accepted") + var valid bool + sig := make([]byte, schnorr.SignatureSize) + for _, ev := range evs { + ev.Pubkey = pkb + id := ev.GetIDBytes() + if sig, err = signer.Sign(id); chk.E(err) { + t.Errorf("failed to sign: %s\n%0x", err, id) + } + if valid, err = verifier.Verify(id, sig); chk.E(err) { + t.Errorf("failed to verify: %s\n%0x", err, id) + } + if !valid { + t.Errorf("invalid signature") + } } + signer.Zero() } func TestECDH(t *testing.T) { @@ -74,14 +127,14 @@ func TestECDH(t *testing.T) { var err error var s1, s2 realy.I var counter int - const total = 50 + const total = 100 for _ = range total { s1, s2 = &p256k.Signer{}, &p256k.Signer{} - if err = s1.GenerateForECDH(); chk.E(err) { + if err = s1.Generate(); chk.E(err) { t.Fatal(err) } for _ = range total { - if err = s2.GenerateForECDH(); chk.E(err) { + if err = s2.Generate(); chk.E(err) { t.Fatal(err) } var secret1, secret2 []byte diff --git a/p256k/secp256k1.go b/p256k/secp256k1.go index 59ad93f..9c276c4 100644 --- a/p256k/secp256k1.go +++ b/p256k/secp256k1.go @@ -4,14 +4,14 @@ package p256k import ( "crypto/rand" - "unsafe" - - "github.com/minio/sha256-simd" "orly.dev/chk" - "orly.dev/ec/schnorr" - "orly.dev/ec/secp256k1" "orly.dev/errorf" "orly.dev/log" + "unsafe" + + "orly.dev/ec/schnorr" + "orly.dev/ec/secp256k1" + "orly.dev/sha256" ) /* diff --git a/p256k/secp256k1_test.go b/p256k/secp256k1_test.go index 79323bb..4237e76 100644 --- a/p256k/secp256k1_test.go +++ b/p256k/secp256k1_test.go @@ -3,11 +3,86 @@ package p256k_test import ( + "bufio" + "bytes" "testing" + + "orly.dev/ec/schnorr" + "orly.dev/event" + "orly.dev/event/examples" + "orly.dev/p256k" + "orly.dev/sha256" ) func TestVerify(t *testing.T) { + evs := make([]*event.E, 0, 10000) + scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) + buf := make([]byte, 1_000_000) + scanner.Buffer(buf, len(buf)) + var err error + for scanner.Scan() { + var valid bool + b := scanner.Bytes() + ev := event.New() + if _, err = ev.Unmarshal(b); chk.E(err) { + t.Errorf("failed to marshal\n%s", b) + } else { + if valid, err = ev.Verify(); chk.E(err) || !valid { + t.Errorf("btcec: invalid signature\n%s", b) + continue + } + } + id := ev.GetIDBytes() + if len(id) != sha256.Size { + t.Errorf("id should be 32 bytes, got %d", len(id)) + continue + } + if err = p256k.VerifyFromBytes(id, ev.Sig, ev.Pubkey); chk.E(err) { + t.Error(err) + continue + } + evs = append(evs, ev) + } } func TestSign(t *testing.T) { + evs := make([]*event.E, 0, 10000) + scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache)) + buf := make([]byte, 1_000_000) + scanner.Buffer(buf, len(buf)) + var err error + var sec1 *p256k.Sec + var pub1 *p256k.XPublicKey + var pb []byte + if _, pb, sec1, pub1, err = p256k.Generate(); chk.E(err) { + t.Fatal(err) + } + for scanner.Scan() { + b := scanner.Bytes() + ev := event.New() + if _, err = ev.Unmarshal(b); chk.E(err) { + t.Errorf("failed to marshal\n%s", b) + } + evs = append(evs, ev) + } + sig := make([]byte, schnorr.SignatureSize) + for _, ev := range evs { + ev.Pubkey = pb + var uid *p256k.Uchar + if uid, err = p256k.Msg(ev.GetIDBytes()); chk.E(err) { + t.Fatal(err) + } + if sig, err = p256k.Sign(uid, sec1.Sec()); chk.E(err) { + t.Fatal(err) + } + ev.Sig = sig + var usig *p256k.Uchar + if usig, err = p256k.Sig(sig); chk.E(err) { + t.Fatal(err) + } + if !p256k.Verify(uid, usig, pub1.Key) { + t.Errorf("invalid signature") + } + } + p256k.Zero(&sec1.Key) } diff --git a/p256k/util_test.go b/p256k/util_test.go new file mode 100644 index 0000000..4399dc7 --- /dev/null +++ b/p256k/util_test.go @@ -0,0 +1,9 @@ +package p256k_test + +import ( + "orly.dev/lol" +) + +var ( + log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf +) diff --git a/qu/README.adoc b/qu/README.adoc new file mode 100644 index 0000000..0ad2e8a --- /dev/null +++ b/qu/README.adoc @@ -0,0 +1,60 @@ += qu + +===== observable signal channels + +simple channels that act as breakers or momentary one-shot triggers. + +can enable logging to get detailed information on channel state, and channels do +not panic if closed channels are attempted to be closed or signalled with. + +provides a neat function based syntax for usage. + +wait function does require use of the `<-` receive operator prefix to be used in +a select statement. + +== usage + +=== creating channels: + +==== unbuffered + +---- +newSigChan := qu.T() +---- + +==== buffered + +---- +newBufferedSigChan := qu.Ts(5) +---- + +==== closing + +---- +newSigChan.Q() +---- + +==== signalling + +---- +newBufferedSigChan.Signal() +---- + +==== logging features + +---- +numberOpenUnbufferedChannels := GetOpenUnbufferedChanCount() + +numberOpenBufferedChannels := GetOpenBufferedChanCount() +---- + +print a list of closed and open channels known by qu: + +---- +PrintChanState() +---- + +== garbage collection + +this library automatically cleans up closed channels once a minute to free +resources that have become unused. \ No newline at end of file diff --git a/qu/qu.go b/qu/qu.go new file mode 100644 index 0000000..2e277d1 --- /dev/null +++ b/qu/qu.go @@ -0,0 +1,242 @@ +// Package qu is a library for making handling signal (chan struct{}) channels +// simpler, as well as monitoring the state of the signal channels in an +// application. +package qu + +import ( + "fmt" + "orly.dev/log" + "strings" + "sync" + "time" + + "orly.dev/atomic" + "orly.dev/lol" +) + +// C is your basic empty struct signalling channel +type C chan struct{} + +var ( + createdList []string + createdChannels []C + createdChannelBufferCounts []int + mx sync.Mutex + logEnabled = atomic.NewBool(false) +) + +// SetLogging switches on and off the channel logging +func SetLogging(on bool) { + logEnabled.Store(on) +} + +func l(a ...interface{}) { + if logEnabled.Load() { + log.D.Ln(a...) + } +} + +func lc(cl func() string) { + if logEnabled.Load() { + log.D.Ln(cl()) + } +} + +// T creates an unbuffered chan struct{} for trigger and quit signalling (momentary and breaker +// switches) +func T() C { + mx.Lock() + defer mx.Unlock() + msg := fmt.Sprintf("chan from %s", lol.GetLoc(1)) + l("created", msg) + createdList = append(createdList, msg) + o := make(C) + createdChannels = append(createdChannels, o) + createdChannelBufferCounts = append(createdChannelBufferCounts, 0) + return o +} + +// Ts creates a buffered chan struct{} which is specifically intended for signalling without +// blocking, generally one is the size of buffer to be used, though there might be conceivable +// cases where the channel should accept more signals without blocking the caller +func Ts(n int) C { + mx.Lock() + defer mx.Unlock() + msg := fmt.Sprintf("buffered chan (%d) from %s", n, lol.GetLoc(1)) + l("created", msg) + createdList = append(createdList, msg) + o := make(C, n) + createdChannels = append(createdChannels, o) + createdChannelBufferCounts = append(createdChannelBufferCounts, n) + return o +} + +// Q closes the channel, which makes it emit a nil every time it is selected. +func (c C) Q() { + open := !testChanIsClosed(c) + lc( + func() (o string) { + lo := getLocForChan(c) + mx.Lock() + defer mx.Unlock() + if open { + return "closing chan from " + lo + "\n" + strings.Repeat( + " ", + 48, + ) + "from" + lol.GetLoc(1) + } else { + return "from" + lol.GetLoc(1) + "\n" + strings.Repeat(" ", 48) + + "channel " + lo + " was already closed" + } + }, + ) + if open { + close(c) + } +} + +// Signal sends struct{}{} on the channel which functions as a momentary switch, +// useful in pairs for stop/start +func (c C) Signal() { + lc(func() (o string) { return "signalling " + getLocForChan(c) }) + if !testChanIsClosed(c) { + c <- struct{}{} + } +} + +// Wait should be placed with a `<-` in a select case in addition to the channel +// variable name +func (c C) Wait() <-chan struct{} { + lc( + func() (o string) { + return fmt.Sprint( + "waiting on "+getLocForChan(c)+"at", + lol.GetLoc(1), + ) + }, + ) + return c +} + +// IsClosed exposes a test to see if the channel is closed +func (c C) IsClosed() bool { + return testChanIsClosed(c) +} + +// testChanIsClosed allows you to see whether the channel has been closed so you +// can avoid a panic by trying to close or signal on it +func testChanIsClosed(ch C) (o bool) { + if ch == nil { + return true + } + select { + case <-ch: + o = true + default: + } + return +} + +// getLocForChan finds which record connects to the channel in question +func getLocForChan(c C) (s string) { + s = "not found" + mx.Lock() + for i := range createdList { + if i >= len(createdChannels) { + break + } + if createdChannels[i] == c { + s = createdList[i] + } + } + mx.Unlock() + return +} + +// once a minute clean up the channel cache to remove closed channels no longer +// in use +func init() { + go func() { + for { + <-time.After(time.Minute) + l("cleaning up closed channels") + var c []C + var ll []string + mx.Lock() + for i := range createdChannels { + if i >= len(createdList) { + break + } + if testChanIsClosed(createdChannels[i]) { + } else { + c = append(c, createdChannels[i]) + ll = append(ll, createdList[i]) + } + } + createdChannels = c + createdList = ll + mx.Unlock() + } + }() +} + +// PrintChanState creates an output showing the current state of the channels +// being monitored This is a function for use by the programmer while debugging +func PrintChanState() { + mx.Lock() + for i := range createdChannels { + if i >= len(createdList) { + break + } + if testChanIsClosed(createdChannels[i]) { + log.T.Ln(">>> closed", createdList[i]) + } else { + log.T.Ln("<<< open", createdList[i]) + } + } + mx.Unlock() +} + +// GetOpenUnbufferedChanCount returns the number of qu channels that are still open +func GetOpenUnbufferedChanCount() (o int) { + mx.Lock() + var c int + for i := range createdChannels { + if i >= len(createdChannels) { + break + } + // skip buffered channels + if createdChannelBufferCounts[i] > 0 { + continue + } + if testChanIsClosed(createdChannels[i]) { + c++ + } else { + o++ + } + } + mx.Unlock() + return +} + +// GetOpenBufferedChanCount returns the number of qu channels that are still open +func GetOpenBufferedChanCount() (o int) { + mx.Lock() + var c int + for i := range createdChannels { + if i >= len(createdChannels) { + break + } + // skip unbuffered channels + if createdChannelBufferCounts[i] < 1 { + continue + } + if testChanIsClosed(createdChannels[i]) { + c++ + } else { + o++ + } + } + mx.Unlock() + return +} diff --git a/ratel/close.go b/ratel/close.go new file mode 100644 index 0000000..604ddbb --- /dev/null +++ b/ratel/close.go @@ -0,0 +1,26 @@ +package ratel + +import ( + "orly.dev/chk" + "orly.dev/log" +) + +// Close the database. If the Flatten flag was set, then trigger the flattening of tables before +// shutting down. +func (r *T) Close() (err error) { + // chk.E(r.DB.Sync()) + r.WG.Wait() + log.I.F("closing database %s", r.Path()) + if r.Flatten { + if err = r.DB.Flatten(4); chk.E(err) { + } + log.D.F("database flattened") + } + if err = r.seq.Release(); chk.E(err) { + } + log.D.F("database released") + if err = r.DB.Close(); chk.E(err) { + } + log.I.F("database closed") + return +} diff --git a/ratel/compact.go b/ratel/compact.go new file mode 100644 index 0000000..3ec89f1 --- /dev/null +++ b/ratel/compact.go @@ -0,0 +1,34 @@ +package ratel + +import ( + "orly.dev/chk" + "orly.dev/event" +) + +// Unmarshal an event from bytes, using compact encoding if configured. +func (r *T) Unmarshal(ev *event.E, evb []byte) (rem []byte, err error) { + // if r.UseCompact { + // if rem, err = ev.UnmarshalCompact(evb); chk.E(err) { + // ev = nil + // evb = evb[:0] + // return + // } + // } else { + if rem, err = ev.Unmarshal(evb); chk.E(err) { + ev = nil + evb = evb[:0] + return + } + // } + return +} + +// Marshal an event using compact encoding if configured. +func (r *T) Marshal(ev *event.E, dst []byte) (b []byte) { + // if r.UseCompact { + // b = ev.MarshalCompact(dst) + // } else { + b = ev.Marshal(dst) + // } + return +} diff --git a/ratel/configuration.go b/ratel/configuration.go new file mode 100644 index 0000000..53d8f3a --- /dev/null +++ b/ratel/configuration.go @@ -0,0 +1,47 @@ +package ratel + +// import ( +// "encoding/json" +// +// "github.com/dgraph-io/badger/v4" +// +// "orly.dev/ratel/prefixes" +// "orly.dev/store" +// ) +// +// // SetConfiguration stores the store.Configuration value to a provided setting. +// func (r *T) SetConfiguration(c *store.Configuration) (err error) { +// var b []byte +// if b, err = json.Marshal(c); chk.E(err) { +// return +// } +// log.I.F("%s", b) +// err = r.Update(func(txn *badger.Txn) (err error) { +// if err = txn.Set(prefixes.Configuration.Key(), b); chk.E(err) { +// return +// } +// return +// }) +// return +// } +// +// // GetConfiguration returns the current store.Configuration stored in the database. +// func (r *T) GetConfiguration() (c *store.Configuration, err error) { +// err = r.View(func(txn *badger.Txn) (err error) { +// c = &store.Configuration{BlockList: make([]string, 0)} +// var it *badger.Item +// if it, err = txn.Get(prefixes.Configuration.Key()); chk.E(err) { +// err = nil +// return +// } +// var b []byte +// if b, err = it.ValueCopy(nil); chk.E(err) { +// return +// } +// if err = json.Unmarshal(b, c); chk.E(err) { +// return +// } +// return +// }) +// return +// } diff --git a/ratel/countevents.go b/ratel/countevents.go new file mode 100644 index 0000000..e560bb0 --- /dev/null +++ b/ratel/countevents.go @@ -0,0 +1,135 @@ +package ratel + +// func (r *T) CountEvents(c context.T, f *filter.T) (count int, approx bool, err error) { +// log.T.ToSliceOfBytes("QueryEvents,%s", f.Serialize()) +// var queries []query +// var extraFilter *filter.T +// var since uint64 +// if queries, extraFilter, since, err = PrepareQueries(f); chk.E(err) { +// return +// } +// var delEvs [][]byte +// defer func() { +// // after the count delete any events that are expired as per NIP-40 +// for _, d := range delEvs { +// chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d))) +// } +// }() +// // search for the keys generated from the filter +// for _, q := range queries { +// select { +// case <-c.Done(): +// return +// default: +// } +// var eventKey []byte +// err = r.View(func(txn *badger.Txn) (err error) { +// // iterate only through keys and in reverse order +// opts := badger.IteratorOptions{ +// Reverse: true, +// } +// it := txn.NewIterator(opts) +// defer it.Close() +// for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() { +// select { +// case <-r.Ctx.Done(): +// return +// case <-c.Done(): +// return +// default: +// } +// item := it.Item() +// k := item.KeyCopy(nil) +// if !q.skipTS { +// if len(k) < createdat.Len+serial.Len { +// continue +// } +// createdAt := createdat.FromKey(k) +// if createdAt.Val.U64() < since { +// break +// } +// } +// // todo: here we should get the kind field from the key and and collate the +// // todo: matches that are replaceable/parameterized replaceable ones to decode +// // todo: to check for replacements so we can actually not set the approx flag. +// ser := serial.FromKey(k) +// eventKey = prefixes.Event.Key(ser) +// // eventKeys = append(eventKeys, idx) +// } +// return +// }) +// if chk.E(err) { +// // this means shutdown, probably +// if errors.Is(err, badger.ErrDBClosed) { +// return +// } +// } +// // todo: here we should decode replaceable events and discard the outdated versions +// if extraFilter != nil { +// // if there is an extra filter we need to fetch and decode the event to determine a +// // match. +// err = r.View(func(txn *badger.Txn) (err error) { +// opts := badger.IteratorOptions{Reverse: true} +// it := txn.NewIterator(opts) +// defer it.Close() +// for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() { +// item := it.Item() +// if r.HasL2 && item.ValueSize() == sha256.Size { +// // we will count this though it may not match in fact. for general, +// // simple filters there isn't likely to be an extrafilter anyway. the +// // count result can have an "approximate" flag so we flip this now. +// approx = true +// return +// } +// ev := &event.E{} +// var appr bool +// if err = item.Value(func(eventValue []byte) (err error) { +// var rem []byte +// if rem, err = r.Unmarshal(ev, eventValue); chk.E(err) { +// return +// } +// if len(rem) > 0 { +// log.T.S(rem) +// } +// if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil { +// var exp uint64 +// if exp, err = strconv.ParseUint(string(et.Value()), 10, 64); chk.E(err) { +// return +// } +// if int64(exp) > time.Now().Unix() { +// // this needs to be deleted +// delEvs = append(delEvs, ev.Id) +// return +// } +// } +// if ev.Kind.IsReplaceable() || +// (ev.Kind.IsParameterizedReplaceable() && +// ev.Tags.GetFirst(tag.New("d")) != nil) { +// // we aren't going to spend this extra time so this just flips the +// // approximate flag. generally clients are asking for counts to get +// // an outside estimate anyway, to avoid exceeding MaxLimit +// appr = true +// } +// return +// }); chk.E(err) { +// continue +// } +// if ev == nil { +// continue +// } +// if extraFilter.Matches(ev) { +// count++ +// if appr { +// approx = true +// } +// return +// } +// } +// return +// }) +// } else { +// count++ +// } +// } +// return +// } diff --git a/ratel/create-a-tag.go b/ratel/create-a-tag.go new file mode 100644 index 0000000..91511fe --- /dev/null +++ b/ratel/create-a-tag.go @@ -0,0 +1,94 @@ +package ratel + +import ( + "orly.dev/chk" + "orly.dev/log" + "strings" + + "orly.dev/ec/schnorr" + "orly.dev/hex" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/arb" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/index" + "orly.dev/ratel/keys/kinder" + "orly.dev/ratel/keys/pubkey" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/tag/atag" +) + +// Create_a_Tag generates tag indexes from a tag key, tag value, created_at +// timestamp and the event serial. +func Create_a_Tag( + tagKey, tagValue string, CA *createdat.T, + ser *serial.T, +) (prf index.P, elems []keys.Element, err error) { + + var pkb []byte + // first check if it might be a public key, fastest test + if len(tagValue) == 2*schnorr.PubKeyBytesLen { + // this could be a pubkey + pkb, err = hex.Dec(tagValue) + if err == nil { + // it's a pubkey + var pkk keys.Element + if pkk, err = pubkey.NewFromBytes(pkb); chk.E(err) { + return + } + prf, elems = prefixes.Tag32, keys.Make(pkk, ser) + return + } else { + err = nil + } + } + // check for `a` tag + if tagKey == "a" && strings.Count(tagValue, ":") == 2 { + a := &atag.T{} + var rem []byte + if rem, err = a.Unmarshal([]byte(tagValue)); chk.E(err) { + return + } + if len(rem) > 0 { + log.I.S("remainder", tagKey, tagValue, rem) + } + prf = prefixes.TagAddr + var pk *pubkey.T + if pk, err = pubkey.NewFromBytes(a.PubKey); chk.E(err) { + return + } + elems = keys.Make( + kinder.New(a.Kind.K), pk, arb.New(a.DTag), CA, + ser, + ) + return + // todo: leaving this here in case bugz, note to remove this later + // // this means we will get 3 pieces here + // split := strings.Split(tagValue, ":") + // // middle element should be a public key so must be 64 hex ciphers + // if len(split[1]) != schnorr.PubKeyBytesLen*2 { + // return + // } + // var k uint16 + // var d string + // if pkb, err = hex.Dec(split[1]); !chk.E(err) { + // var kin uint64 + // if kin, err = strconv.ParseUint(split[0], 10, 16); err == nil { + // k = uint16(kin) + // d = split[2] + // var pk *pubkey.T + // if pk, err = pubkey.NewFromBytes(pkb); chk.E(err) { + // return + // } + // prf = prefixes.TagAddr + // elems = keys.Make(kinder.New(k), pk, arb.NewFromString(d), CA, + // ser) + // return + // } + // } + } + // store whatever as utf-8 + prf = prefixes.Tag + elems = keys.Make(arb.New(tagValue), CA, ser) + return +} diff --git a/ratel/del/del.go b/ratel/del/del.go new file mode 100644 index 0000000..b5a6a80 --- /dev/null +++ b/ratel/del/del.go @@ -0,0 +1,13 @@ +// Package del is a simple sorted list for database keys, primarily used to +// collect lists of events that need to be deleted either by expiration or for +// the garbage collector. +package del + +import "bytes" + +// Items is an array of bytes used for sorting and collating database index keys. +type Items [][]byte + +func (c Items) Len() int { return len(c) } +func (c Items) Less(i, j int) bool { return bytes.Compare(c[i], c[j]) < 0 } +func (c Items) Swap(i, j int) { c[i], c[j] = c[j], c[i] } diff --git a/ratel/deleteevent.go b/ratel/deleteevent.go new file mode 100644 index 0000000..adcda3e --- /dev/null +++ b/ratel/deleteevent.go @@ -0,0 +1,120 @@ +package ratel + +import ( + "github.com/dgraph-io/badger/v4" + "orly.dev/chk" + "orly.dev/log" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/id" + "orly.dev/ratel/keys/index" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/keys/tombstone" + "orly.dev/ratel/prefixes" + "orly.dev/timestamp" +) + +// DeleteEvent deletes an event if it exists and writes a tombstone for the event unless +// requested not to, so that the event can't be saved again. +func (r *T) DeleteEvent( + c context.T, eid *eventid.T, noTombstone ...bool, +) (err error) { + log.W.F("deleting event %0x", eid.Bytes()) + var foundSerial []byte + seri := serial.New(nil) + err = r.View( + func(txn *badger.Txn) (err error) { + // query event by id to ensure we don't try to save duplicates + prf := prefixes.Id.Key(id.New(eid)) + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + it.Seek(prf) + if it.ValidForPrefix(prf) { + var k []byte + // get the serial + k = it.Item().Key() + // copy serial out + keys.Read(k, index.Empty(), id.New(&eventid.T{}), seri) + // save into foundSerial + foundSerial = seri.Val + } + return + }, + ) + if chk.E(err) { + return + } + if foundSerial == nil { + return + } + var indexKeys [][]byte + ev := event.New() + var evKey, evb, tombstoneKey []byte + // fetch the event to get its index keys + err = r.View( + func(txn *badger.Txn) (err error) { + // retrieve the event record + evKey = keys.Write(index.New(prefixes.Event), seri) + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + it.Seek(evKey) + if it.ValidForPrefix(evKey) { + if evb, err = it.Item().ValueCopy(evb); chk.E(err) { + return + } + // log.I.S(evb) + var rem []byte + if rem, err = r.Unmarshal(ev, evb); chk.E(err) { + return + } + if len(rem) != 0 { + log.I.S(rem) + } + // log.I.S(rem, ev, seri) + indexKeys = GetIndexKeysForEvent(ev, seri) + // // we don't make tombstones for replacements, but it is better to shift that + // // logic outside of this closure. + // if len(noTombstone) > 0 && !noTombstone[0] { + if len(noTombstone) > 0 && !noTombstone[0] { + log.I.F("making tombstone") + ts := tombstone.NewWith(ev.EventId()) + tombstoneKey = prefixes.Tombstone.Key( + ts, createdat.New(timestamp.Now()), + ) + } + // } + return + } + return + }, + ) + if chk.E(err) { + return + } + err = r.Update( + func(txn *badger.Txn) (err error) { + if err = txn.Delete(evKey); chk.E(err) { + } + for _, key := range indexKeys { + if err = txn.Delete(key); chk.E(err) { + } + } + if len(tombstoneKey) > 0 { + log.T.S("writing tombstone", tombstoneKey) + // write tombstone + log.W.F( + "writing tombstone %0x for event %0x", tombstoneKey, ev.Id, + ) + if err = txn.Set(tombstoneKey, nil); chk.E(err) { + return + } + } + return + }, + ) + return +} diff --git a/ratel/export.go b/ratel/export.go new file mode 100644 index 0000000..067cb81 --- /dev/null +++ b/ratel/export.go @@ -0,0 +1,209 @@ +package ratel + +import ( + "errors" + "fmt" + "io" + "orly.dev/chk" + "orly.dev/log" + + "github.com/dgraph-io/badger/v4" + + "orly.dev/context" + "orly.dev/filter" + "orly.dev/hex" + "orly.dev/qu" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/tag" + "orly.dev/tags" +) + +// Export the complete database of stored events to an io.Writer in line structured minified +// JSON. +func (r *T) Export(c context.T, w io.Writer, pubkeys ...[]byte) { + var counter int + var err error + if len(pubkeys) > 0 { + var pks []string + for i := range pubkeys { + pks = append(pks, hex.Enc(pubkeys[i])) + } + o := "[" + for _, pk := range pks { + o += pk + "," + } + o += "]" + log.I.F("exporting selected pubkeys:\n%s", o) + keyChan := make(chan []byte, 256) + // specific set of public keys, so we need to run a search + fa := &filter.F{Authors: tag.New(pubkeys...)} + var queries []query + if queries, _, _, err = PrepareQueries(fa); chk.E(err) { + return + } + pTag := [][]byte{[]byte("#b")} + pTag = append(pTag, pubkeys...) + fp := &filter.F{Tags: tags.New(tag.New(pTag...))} + var queries2 []query + if queries2, _, _, err = PrepareQueries(fp); chk.E(err) { + return + } + queries = append(queries, queries2...) + // start up writer loop + quit := qu.T() + go func() { + for { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + case <-quit: + return + case eventKey := <-keyChan: + err = r.View( + func(txn *badger.Txn) (err error) { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + case <-quit: + return + default: + } + opts := badger.IteratorOptions{Reverse: false} + it := txn.NewIterator(opts) + defer it.Close() + var count int + for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() { + count++ + item := it.Item() + // if r.HasL2 && item.ValueSize() == sha256.Size { + // // we aren't fetching from L2 for export, so don't send this back. + // return + // } + if err = item.Value( + func(eventValue []byte) (err error) { + // send the event to client (no need to re-encode it) + if _, err = fmt.Fprintf( + w, "%s\n", eventValue, + ); chk.E(err) { + return + } + return + }, + ); chk.E(err) { + return + } + } + return + }, + ) + if chk.E(err) { + } + } + } + }() + // stop the writer loop + defer quit.Q() + for _, q := range queries { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + // search for the keys generated from the filter + err = r.View( + func(txn *badger.Txn) (err error) { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + opts := badger.IteratorOptions{ + Reverse: true, + } + it := txn.NewIterator(opts) + defer it.Close() + for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() { + item := it.Item() + k := item.KeyCopy(nil) + evKey := prefixes.Event.Key(serial.FromKey(k)) + counter++ + if counter%1000 == 0 && counter > 0 { + log.I.F("%d events exported", counter) + } + keyChan <- evKey + } + return + }, + ) + if chk.E(err) { + // this means shutdown, probably + if errors.Is(err, badger.ErrDBClosed) { + return + } + } + } + } else { + // blanket download requested + err = r.View( + func(txn *badger.Txn) (err error) { + it := txn.NewIterator(badger.IteratorOptions{Prefix: prefixes.Event.Key()}) + defer it.Close() + for it.Rewind(); it.Valid(); it.Next() { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + item := it.Item() + b, e := item.ValueCopy(nil) + if chk.E(e) { + // already isn't the same as the return value! + // err = nil + continue + } + // send the event to client + // if r.UseCompact { + // ev := &event.F{} + // var rem []byte + // rem, err = ev.UnmarshalCompact(b) + // if chk.E(err) { + // err = nil + // continue + // } + // if len(rem) > 0 { + // log.I.S(rem) + // } + // if _, err = fmt.Fprintf(w, "%s\n", ev.Marshal(nil)); chk.E(err) { + // return + // } + // + // } else { + // the database stores correct JSON versions so no need to decode/encode. + if _, err = fmt.Fprintf(w, "%s\n", b); chk.E(err) { + return + } + // } + counter++ + if counter%1000 == 0 && counter > 0 { + log.I.F("%d events exported", counter) + } + } + return + }, + ) + chk.E(err) + } + log.I.Ln("exported", counter, "events") + return +} diff --git a/ratel/fetch-ids.go b/ratel/fetch-ids.go new file mode 100644 index 0000000..204380f --- /dev/null +++ b/ratel/fetch-ids.go @@ -0,0 +1,79 @@ +package ratel + +import ( + "io" + "orly.dev/chk" + "orly.dev/eventidserial" + + "github.com/dgraph-io/badger/v4" + + "orly.dev/context" + "orly.dev/ratel/keys/id" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/tag" +) + +// FetchIds retrieves events based on a list of event Ids that have been provided. +func (r *T) FetchIds(c context.T, evIds *tag.T, out io.Writer) (err error) { + // create an ample buffer for decoding events, 100kb should usually be enough, if + // it needs to get bigger it will be reallocated. + b := make([]byte, 0, 100000) + err = r.View( + func(txn *badger.Txn) (err error) { + for _, v := range evIds.ToSliceOfBytes() { + var evId *id.T + if evId, err = id.NewFromBytes(v); chk.E(err) { + return + } + k := prefixes.Id.Key(evId) + it := txn.NewIterator(badger.DefaultIteratorOptions) + var ser *serial.T + defer it.Close() + for it.Seek(k); it.ValidForPrefix(k); it.Next() { + key := it.Item().Key() + ser = serial.FromKey(key) + break + } + var item *badger.Item + if item, err = txn.Get(prefixes.Event.Key(ser)); chk.E(err) { + return + } + if b, err = item.ValueCopy(nil); chk.E(err) { + return + } + // if r.UseCompact { + // ev := &event.E{} + // var rem []byte + // if rem, err = ev.UnmarshalCompact(b); chk.E(err) { + // return + // } + // if len(rem) > 0 { + // log.I.S(rem) + // } + // if _, err = out.Write(ev.Serialize()); chk.E(err) { + // return + // } + // } else { + // if db isn't using compact encoding the bytes are already right + if _, err = out.Write(b); chk.E(err) { + return + } + // } + // add the new line after entries + if _, err = out.Write([]byte{'\n'}); chk.E(err) { + return + } + } + return + }, + ) + return +} + +func (r *T) EventIdsBySerial(start uint64, count int) ( + evs []eventidserial.E, err error, +) { + // TODO implement me + panic("implement me") +} diff --git a/ratel/garbagecollector.go b/ratel/garbagecollector.go new file mode 100644 index 0000000..6d0e4a6 --- /dev/null +++ b/ratel/garbagecollector.go @@ -0,0 +1,66 @@ +package ratel + +// import ( +// "time" +// +// "orly.dev/units" +// ) +// +// // GarbageCollector starts up a ticker that runs a check on space utilisation +// // and when it exceeds the high-water mark, prunes back to the low-water mark. +// // +// // This function should be invoked as a goroutine, and will terminate when the +// // backend context is canceled. +// // +// // TODO: this needs to be updated and set to actually run by default specifically just for +// // TODO: pruning tombstones after they are a year or more old. +// func (r *T) GarbageCollector() { +// log.D.F("starting ratel back-end garbage collector,"+ +// "max size %0.3fGb,"+ +// "high water %0.3fGb,"+ +// "low water %0.3fGb,"+ +// "GC check frequency %v,%s", +// float32(r.DBSizeLimit/units.Gb), +// float32(r.DBHighWater*r.DBSizeLimit/100)/float32(units.Gb), +// float32(r.DBLowWater*r.DBSizeLimit/100)/float32(units.Gb), +// r.GCFrequency, +// r.Path(), +// ) +// var err error +// if err = r.GCRun(); chk.E(err) { +// } +// GCticker := time.NewTicker(r.GCFrequency) +// syncTicker := time.NewTicker(r.GCFrequency * 10) +// out: +// for { +// select { +// case <-r.Ctx.Done(): +// log.W.Ln("stopping event GC ticker") +// GCticker.Stop() +// break out +// case <-GCticker.C: +// // log.T.Ln("running GC", r.Path) +// if err = r.GCRun(); chk.E(err) { +// } +// case <-syncTicker.C: +// chk.E(r.DB.Sync()) +// } +// } +// log.I.Ln("closing badger event store garbage collector") +// } +// +// func (r *T) GCRun() (err error) { +// log.T.Ln("running GC", r.Path()) +// var pruneEvents, pruneIndexes DelItems +// if pruneEvents, pruneIndexes, err = r.GCMark(); chk.E(err) { +// return +// } +// if len(pruneEvents) < 1 && len(pruneIndexes) < 1 { +// // log.I.Ln("GC sweep unnecessary") +// return +// } +// if err = r.GCSweep(pruneEvents, pruneIndexes); chk.E(err) { +// return +// } +// return +// } diff --git a/ratel/gccount.go b/ratel/gccount.go new file mode 100644 index 0000000..0f04f8d --- /dev/null +++ b/ratel/gccount.go @@ -0,0 +1,203 @@ +package ratel + +// import ( +// "encoding/binary" +// "fmt" +// "sort" +// "sync" +// "time" +// +// "github.com/dgraph-io/badger/v4" +// +// "orly.dev/ratel/keys/count" +// "orly.dev/ratel/keys/createdat" +// "orly.dev/ratel/keys/index" +// "orly.dev/ratel/keys/serial" +// "orly.dev/ratel/prefixes" +// "orly.dev/sha256" +// "orly.dev/timestamp" +// "orly.dev/units" +// ) +// +// const KeyLen = serial.Len + 1 +// const PrunedLen = sha256.Size + KeyLen +// const CounterLen = KeyLen + createdat.Len +// +// // GCCount performs a census of events in the event store. It counts the number +// // of events and their size, and if there is a layer 2 enabled, it counts the +// // number of events that have been pruned and thus have indexes to count. +// // +// // Both operations are more efficient combined together rather than separated, +// // thus this is a fairly long function. +// func (r *T) GCCount() (unpruned, pruned count.Items, unprunedTotal, +// prunedTotal int, err error) { +// +// // log.D.Ln("running GC count", r.Path()) +// overallStart := time.Now() +// prf := prefixes.Event.Key() +// evStream := r.DB.NewStream() +// evStream.Prefix = prf +// var countMx sync.Mutex +// var totalCounter int +// evStream.ChooseKey = func(item *badger.Item) (b bool) { +// if item.IsDeletedOrExpired() { +// return +// } +// key := make([]byte, index.Len+serial.Len) +// item.KeyCopy(key) +// ser := serial.FromKey(key) +// size := uint32(item.ValueSize()) +// totalCounter++ +// countMx.Lock() +// if size == sha256.Size { +// pruned = append(pruned, &count.Item{ +// Serial: ser.Uint64(), +// Size: PrunedLen, +// }) +// } else { +// unpruned = append(unpruned, &count.Item{ +// Serial: ser.Uint64(), +// Size: size + KeyLen, +// }) +// } +// countMx.Unlock() +// return +// } +// // started := time.Now() +// // run in a background thread to parallelise all the streams +// if err = evStream.Orchestrate(r.Ctx); chk.E(err) { +// return +// } +// log.T.F("counted %d events, %d pruned events in %v %s", len(unpruned), +// len(pruned), time.Now().Sub(overallStart), r.Path()) +// var unprunedBySerial, prunedBySerial count.ItemsBySerial +// unprunedBySerial = count.ItemsBySerial(unpruned) +// sort.Sort(unprunedBySerial) +// var countFresh count.Freshes +// // pruneStarted := time.Now() +// counterStream := r.DB.NewStream() +// counterStream.Prefix = []byte{prefixes.Counter.B()} +// v := make([]byte, createdat.Len) +// countFresh = make(count.Freshes, 0, totalCounter) +// counterStream.ChooseKey = func(item *badger.Item) (b bool) { +// key := make([]byte, index.Len+serial.Len) +// item.KeyCopy(key) +// s64 := serial.FromKey(key).Uint64() +// countMx.Lock() +// countFresh = append(countFresh, +// &count.Fresh{ +// Serial: s64, +// Freshness: timestamp.FromUnix(int64(binary.BigEndian.Uint64(v))), +// }) +// countMx.Unlock() +// return +// } +// // run in a background thread to parallelise all the streams +// if err = counterStream.Orchestrate(r.Ctx); chk.E(err) { +// return +// } +// // wait until all the jobs are complete +// sort.Sort(countFresh) +// if r.HasL2 { +// // if there is L2 we are marking pruned indexes as well +// // log.I.ToSliceOfBytes("counted %d pruned events in %v %s", len(pruned), +// // time.Now().Sub(pruneStarted), r.Path()) +// prunedBySerial = count.ItemsBySerial(pruned) +// sort.Sort(prunedBySerial) +// } +// // both slices are now sorted by serial, so we can now iterate the freshness +// // slice and write in the access timestamps to the unpruned +// // +// // this provides the least amount of iteration and computation to essentially +// // zip two tables together +// var unprunedCursor, prunedCursor int +// // we also need to create a map of serials to their respective array index, and +// // we know how big it has to be so we can avoid allocations during the iteration. +// // +// // if there is no L2 this will be an empty map and have nothing added to it. +// prunedMap := make(map[uint64]int, len(prunedBySerial)) +// for i := range countFresh { +// // populate freshness of unpruned item +// if len(unprunedBySerial) > i && countFresh[i].Serial == +// unprunedBySerial[unprunedCursor].Serial { +// // add the counter record to the size +// unprunedBySerial[unprunedCursor].Size += CounterLen +// unprunedBySerial[unprunedCursor].Freshness = countFresh[i].Freshness +// unprunedCursor++ +// // if there is no L2 we should not see any here anyway +// } else if r.HasL2 && len(prunedBySerial) > 0 && len(prunedBySerial) < prunedCursor { +// if countFresh[i].Serial == +// prunedBySerial[prunedCursor].Serial { +// // populate freshness of pruned item +// ps := prunedBySerial[prunedCursor] +// // add the counter record to the size +// ps.Size += CounterLen +// ps.Freshness = countFresh[i].Freshness +// prunedMap[ps.Serial] = prunedCursor +// prunedCursor++ +// } +// } +// } +// if r.HasL2 { +// // lastly, we need to count the size of all relevant transactions from the +// // pruned set +// for _, fp := range prefixes.FilterPrefixes { +// // this can all be done concurrently +// go func(fp []byte) { +// evStream = r.DB.NewStream() +// evStream.Prefix = fp +// evStream.ChooseKey = func(item *badger.Item) (b bool) { +// k := item.KeyCopy(nil) +// ser := serial.FromKey(k) +// uSer := ser.Uint64() +// countMx.Lock() +// // the pruned map allows us to (more) directly find the slice index relevant to +// // the serial +// pruned[prunedMap[uSer]].Size += uint32(len(k)) + uint32(item.ValueSize()) +// countMx.Unlock() +// return +// } +// }(fp) +// } +// } +// hw, _ := r.GetEventHeadroom() +// unprunedTotal = unpruned.Total() +// up := float64(unprunedTotal) +// var o string +// o += fmt.Sprintf("%8d complete,"+ +// "total %0.6f Gb,"+ +// "HW %0.6f Gb", +// len(unpruned), +// up/units.Gb, +// float64(hw)/units.Gb, +// ) +// if r.HasL2 { +// l2hw, _ := r.GetIndexHeadroom() +// prunedTotal = pruned.Total() +// p := float64(prunedTotal) +// if r.HasL2 { +// o += fmt.Sprintf(",%8d pruned,"+ +// "total %0.6f Gb,"+ +// "pruned HW %0.6f Gb,computed in %v,%s", +// len(pruned), +// p/units.Gb, +// float64(l2hw)/units.Gb, +// time.Now().Sub(overallStart), +// r.Path(), +// ) +// } +// } +// log.D.Ln(o) +// return +// } +// +// func (r *T) GetIndexHeadroom() (hw, lw int) { +// limit := r.DBSizeLimit - r.DBSizeLimit*r.DBHighWater/100 +// return limit * r.DBHighWater / 100, +// limit * r.DBLowWater / 100 +// } +// +// func (r *T) GetEventHeadroom() (hw, lw int) { +// return r.DBSizeLimit * r.DBHighWater / 100, +// r.DBSizeLimit * r.DBLowWater / 100 +// } diff --git a/ratel/gcmark.go b/ratel/gcmark.go new file mode 100644 index 0000000..77ab9a3 --- /dev/null +++ b/ratel/gcmark.go @@ -0,0 +1,63 @@ +package ratel + +// import ( +// "sort" +// +// "orly.dev/ratel/keys/count" +// "orly.dev/units" +// ) +// +// type DelItems []uint64 +// +// // GCMark first gathers the serial, data size and last accessed information +// // about all events and pruned events using GCCount then sorts the results of +// // the events and indexes by least recently accessed and generates the set of +// // serials of events that need to be deleted +// func (r *T) GCMark() (pruneEvents, pruneIndexes DelItems, err error) { +// var unpruned, pruned count.Items +// var uTotal, pTotal int +// if unpruned, pruned, uTotal, pTotal, err = r.GCCount(); chk.E(err) { +// return +// } +// hw, lw := r.GetEventHeadroom() +// if uTotal > hw { +// // run event GC mark +// sort.Sort(unpruned) +// pruneOff := uTotal - lw +// var cumulative, lastIndex int +// for lastIndex = range unpruned { +// if cumulative > pruneOff { +// break +// } +// cumulative += int(unpruned[lastIndex].Size) +// pruneEvents = append(pruneEvents, unpruned[lastIndex].Serial) +// } +// log.D.F("found %d events to prune,which will bring current "+ +// "utilization down to %0.6f Gb,%s", +// lastIndex-1, float64(uTotal-cumulative)/units.Gb, r.Path()) +// } +// l2hw, l2lw := r.GetIndexHeadroom() +// if r.HasL2 && pTotal > l2hw { +// // run index GC mark +// sort.Sort(pruned) +// var lastIndex int +// // we want to remove the oldest indexes until at or below the index low water mark. +// space := pTotal +// // count the number of events until the low water mark +// for lastIndex = range pruned { +// if space < l2lw { +// break +// } +// space -= int(pruned[lastIndex].Size) +// } +// log.D.F("deleting %d indexes using %d bytes to bring pruned index size to %d", +// lastIndex+1, pTotal-l2lw, space) +// for i := range pruned { +// if i > lastIndex { +// break +// } +// pruneIndexes = append(pruneIndexes, pruned[i].Serial) +// } +// } +// return +// } diff --git a/ratel/gcsweep.go b/ratel/gcsweep.go new file mode 100644 index 0000000..cc527ae --- /dev/null +++ b/ratel/gcsweep.go @@ -0,0 +1,124 @@ +package ratel + +// // GCSweep runs the delete on all of the items that GCMark has determined should be deleted. +// func (r *T) GCSweep(evs, idxs DelItems) (err error) { +// // first we must gather all the indexes of the relevant events +// started := time.Now() +// batch := r.DB.NewWriteBatch() +// defer func() { +// log.I.Ln("flushing GC sweep batch") +// if err = batch.Flush(); chk.E(err) { +// return +// } +// if vlerr := r.DB.RunValueLogGC(0.5); vlerr == nil { +// log.I.Ln("value log cleaned up") +// } +// chk.E(r.DB.Sync()) +// batch.Cancel() +// log.I.Ln("completed sweep in", time.Now().Sub(started), r.Path()) +// }() +// // var wg sync.WaitGroup +// // go func() { +// // wg.Add(1) +// // defer wg.Done() +// stream := r.DB.NewStream() +// // get all the event indexes to delete/prune +// stream.Prefix = prefixes.Event.Key() +// stream.ChooseKey = func(item *badger.Item) (boo bool) { +// if item.KeySize() != 1+serial.Len { +// return +// } +// if item.IsDeletedOrExpired() { +// return +// } +// key := item.KeyCopy(nil) +// ser := serial.FromKey(key).Uint64() +// var found bool +// for i := range evs { +// if evs[i] == ser { +// found = true +// break +// } +// } +// if !found { +// return +// } +// if r.HasL2 { +// // if it's already pruned, skip +// if item.ValueSize() == sha256.Size { +// return +// } +// // if there is L2 we are only pruning (replacing event with the Id hash) +// var evb []byte +// if evb, err = item.ValueCopy(nil); chk.E(err) { +// return +// } +// ev := &event.E{} +// var rem []byte +// if rem, err = r.Unmarshal(ev, evb); chk.E(err) { +// return +// } +// if len(rem) != 0 { +// log.I.S(rem) +// } +// // otherwise we are deleting +// if err = batch.Delete(key); chk.E(err) { +// return +// } +// if err = batch.Set(key, ev.Id); chk.E(err) { +// return +// } +// return +// } else { +// // otherwise we are deleting +// if err = batch.Delete(key); chk.E(err) { +// return +// } +// } +// return +// } +// // execute the event prune/delete +// if err = stream.Orchestrate(r.Ctx); chk.E(err) { +// return +// } +// // }() +// // next delete all the indexes +// if len(idxs) > 0 && r.HasL2 { +// log.I.Ln("pruning indexes") +// // we have to remove everything +// prfs := [][]byte{prefixes.Event.Key()} +// prfs = append(prfs, prefixes.FilterPrefixes...) +// prfs = append(prfs, []byte{prefixes.Counter.B()}) +// for _, prf := range prfs { +// stream = r.DB.NewStream() +// stream.Prefix = prf +// stream.ChooseKey = func(item *badger.Item) (boo bool) { +// if item.IsDeletedOrExpired() || item.KeySize() < serial.Len+1 { +// return +// } +// key := item.KeyCopy(nil) +// ser := serial.FromKey(key).Uint64() +// var found bool +// for _, idx := range idxs { +// if idx == ser { +// found = true +// break +// } +// } +// if !found { +// return +// } +// // log.I.ToSliceOfBytes("deleting index %x %d", prf, ser) +// if err = batch.Delete(key); chk.E(err) { +// return +// } +// return +// } +// if err = stream.Orchestrate(r.Ctx); chk.E(err) { +// return +// } +// log.T.Ln("completed index prefix", prf) +// } +// } +// return +// } diff --git a/ratel/getecounterkey.go b/ratel/getecounterkey.go new file mode 100644 index 0000000..879cc38 --- /dev/null +++ b/ratel/getecounterkey.go @@ -0,0 +1,10 @@ +package ratel + +//// GetCounterKey returns the proper counter key for a given event Id. This needs +//// a separate function because of what it does, but is generated in the general +//// GetIndexKeysForEvent function. +//func GetCounterKey(ser *serial.T) (key []byte) { +// key = prefixes.Counter.Key(ser) +// // log.T.ToSliceOfBytes("counter key %d %d", index.Counter, ser.Uint64()) +// return +//} diff --git a/ratel/getindexkeysforevent.go b/ratel/getindexkeysforevent.go new file mode 100644 index 0000000..63c6959 --- /dev/null +++ b/ratel/getindexkeysforevent.go @@ -0,0 +1,115 @@ +package ratel + +import ( + "bytes" + "orly.dev/chk" + "orly.dev/log" + + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/fullid" + "orly.dev/ratel/keys/fullpubkey" + "orly.dev/ratel/keys/id" + "orly.dev/ratel/keys/index" + "orly.dev/ratel/keys/kinder" + "orly.dev/ratel/keys/pubkey" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/tag" +) + +// GetIndexKeysForEvent generates all the index keys required to filter for +// events. evtSerial should be the output of Serial() which gets a unique, +// monotonic counter value for each new event. +func GetIndexKeysForEvent(ev *event.E, ser *serial.T) (keyz [][]byte) { + + var err error + keyz = make([][]byte, 0, 18) + ID := id.New(eventid.NewWith(ev.Id)) + CA := createdat.New(ev.CreatedAt) + K := kinder.New(ev.Kind.ToU16()) + PK, _ := pubkey.New(ev.Pubkey) + FID := fullid.New(eventid.NewWith(ev.Id)) + FPK := fullpubkey.New(ev.Pubkey) + // indexes + { // ~ by id + k := prefixes.Id.Key(ID, ser) + // log.T.ToSliceOfBytes("id key: %x %0x %0x", k[0], k[1:9], k[9:]) + keyz = append(keyz, k) + } + { // ~ by pubkey+date + k := prefixes.Pubkey.Key(PK, CA, ser) + // log.T.ToSliceOfBytes("pubkey + date key: %x %0x %0x %0x", + // k[0], k[1:9], k[9:17], k[17:]) + keyz = append(keyz, k) + } + { // ~ by kind+date + k := prefixes.Kind.Key(K, CA, ser) + // log.T.ToSliceOfBytes("kind + date key: %x %0x %0x %0x", + // k[0], k[1:3], k[3:11], k[11:]) + keyz = append(keyz, k) + } + { // ~ by pubkey+kind+date + k := prefixes.PubkeyKind.Key(PK, K, CA, ser) + // log.T.ToSliceOfBytes("pubkey + kind + date key: %x %0x %0x %0x %0x", + // k[0], k[1:9], k[9:11], k[11:19], k[19:]) + keyz = append(keyz, k) + } + // ~ by tag value + date + for i, t := range ev.Tags.ToSliceOfTags() { + // there is no value field + if t.Len() < 2 || + // the tag is not a-zA-Z probably (this would permit arbitrary other + // single byte chars) + len(t.ToSliceOfBytes()[0]) != 1 || + // the second field is zero length + len(t.ToSliceOfBytes()[1]) == 0 || + // the second field is more than 100 characters long + len(t.ToSliceOfBytes()[1]) > 100 { + // any of the above is true then the tag is not indexable + continue + } + var firstIndex int + var tt *tag.T + for firstIndex, tt = range ev.Tags.ToSliceOfTags() { + if tt.Len() >= 2 && bytes.Equal(tt.B(1), t.B(1)) { + break + } + } + if firstIndex != i { + // duplicate + continue + } + // get key prefix (with full length) and offset where to write the last + // parts + prf, elems := index.P(0), []keys.Element(nil) + if prf, elems, err = Create_a_Tag( + string(t.ToSliceOfBytes()[0]), + string(t.ToSliceOfBytes()[1]), CA, + ser, + ); chk.E(err) { + log.I.F("%v", t.ToStringSlice()) + return + } + k := prf.Key(elems...) + // log.T.ToSliceOfBytes("tag '%s': %s key %0x", t.ToSliceOfBytes()[0], t.ToSliceOfBytes()[1:], k) + keyz = append(keyz, k) + } + { // ~ by date only + k := prefixes.CreatedAt.Key(CA, ser) + // log.T.ToSliceOfBytes("date key: %x %0x %0x", k[0], k[1:9], k[9:]) + keyz = append(keyz, k) + } + // { // Counter index - for storing last access time of events. + // k := GetCounterKey(ser) + // keyz = append(keyz, k) + // } + { // - full Id index - enabling retrieving the event Id without unmarshalling the data + k := prefixes.FullIndex.Key(ser, FID, FPK, CA) + // log.T.ToSliceOfBytes("full id: %x %0x %0x", k[0], k[1:9], k[9:]) + keyz = append(keyz, k) + } + return +} diff --git a/ratel/gettagkeyprefix.go b/ratel/gettagkeyprefix.go new file mode 100644 index 0000000..9b79335 --- /dev/null +++ b/ratel/gettagkeyprefix.go @@ -0,0 +1,56 @@ +package ratel + +import ( + eventstore "orly.dev/addresstag" + "orly.dev/chk" + "orly.dev/hex" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/arb" + "orly.dev/ratel/keys/kinder" + "orly.dev/ratel/keys/pubkey" + "orly.dev/ratel/prefixes" +) + +// GetTagKeyPrefix returns tag index prefixes based on the initial field of a +// tag. +// +// There is 3 types of index tag keys: +// +// - TagAddr: [ 8 ][ 2b Kind ][ 8b Pubkey ][ address/URL ][ 8b Serial ] +// +// - Tag32: [ 7 ][ 8b Pubkey ][ 8b Serial ] +// +// - Tag: [ 6 ][ address/URL ][ 8b Serial ] +// +// This function produces the initial bytes without the index. +func GetTagKeyPrefix(tagValue string) (key []byte, err error) { + if k, pkb, d := eventstore.DecodeAddressTag(tagValue); len(pkb) == 32 { + // store value in the new special "a" tag index + var pk *pubkey.T + if pk, err = pubkey.NewFromBytes(pkb); chk.E(err) { + return + } + els := []keys.Element{kinder.New(k), pk} + if len(d) > 0 { + els = append(els, arb.New(d)) + } + key = prefixes.TagAddr.Key(els...) + } else if pkb, _ := hex.Dec(tagValue); len(pkb) == 32 { + // store value as bytes + var pkk *pubkey.T + if pkk, err = pubkey.NewFromBytes(pkb); chk.E(err) { + return + } + key = prefixes.Tag32.Key(pkk) + } else { + // store whatever as utf-8 + if len(tagValue) > 0 { + var a *arb.T + a = arb.New(tagValue) + key = prefixes.Tag.Key(a) + } else { + key = prefixes.Tag.Key() + } + } + return +} diff --git a/ratel/import.go b/ratel/import.go new file mode 100644 index 0000000..2986170 --- /dev/null +++ b/ratel/import.go @@ -0,0 +1,49 @@ +package ratel + +import ( + "bufio" + "io" + "orly.dev/chk" + "orly.dev/log" + + "orly.dev/event" +) + +const maxLen = 500000000 + +// Import a collection of events in line structured minified JSON format (JSONL). +func (r *T) Import(rr io.Reader) { + r.Flatten = true + var err error + scan := bufio.NewScanner(rr) + buf := make([]byte, maxLen) + scan.Buffer(buf, maxLen) + var count, total int + for scan.Scan() { + b := scan.Bytes() + total += len(b) + 1 + if len(b) < 1 { + continue + } + ev := &event.E{} + if _, err = ev.Unmarshal(b); err != nil { + continue + } + if _, _, err = r.SaveEvent(r.Ctx, ev); err != nil { + continue + } + count++ + if count%1000 == 0 { + log.I.F("received %d events", count) + } + if count > 0 && count%10000 == 0 { + chk.T(r.DB.Sync()) + chk.T(r.DB.RunValueLogGC(0.5)) + } + } + log.I.F("read %d bytes and saved %d events", total, count) + err = scan.Err() + if chk.E(err) { + } + return +} diff --git a/ratel/init.go b/ratel/init.go new file mode 100644 index 0000000..a76d4ea --- /dev/null +++ b/ratel/init.go @@ -0,0 +1,114 @@ +package ratel + +import ( + "encoding/binary" + "errors" + "fmt" + "orly.dev/chk" + "orly.dev/log" + + "github.com/dgraph-io/badger/v4" + "orly.dev/ratel/prefixes" +) + +// Init sets up the database with the loaded configuration. +func (r *T) Init(path string) (err error) { + r.dataDir = path + log.I.Ln("opening ratel event store at", r.Path()) + opts := badger.DefaultOptions(r.dataDir) + // opts.BlockCacheSize = int64(r.BlockCacheSize) + // opts.BlockSize = 128 * units.Mb + // opts.CompactL0OnClose = true + // opts.LmaxCompaction = true + // switch r.Compression { + // case "none": + // opts.Compression = options.None + // case "snappy": + // opts.Compression = options.Snappy + // case "zstd": + // opts.Compression = options.ZSTD + // } + r.Logger = NewLogger(r.InitLogLevel, r.dataDir) + opts.Logger = r.Logger + if r.DB, err = badger.Open(opts); chk.E(err) { + return err + } + log.T.Ln("getting event store sequence index", r.dataDir) + if r.seq, err = r.DB.GetSequence([]byte("events"), 1000); chk.E(err) { + return err + } + log.T.Ln("running migrations", r.dataDir) + if err = r.runMigrations(); chk.E(err) { + return log.E.Err("error running migrations: %w; %s", err, r.dataDir) + } + // if r.DBSizeLimit > 0 { + // go r.GarbageCollector() + // // } else { + // // go r.GCCount() + // } + return nil + +} + +const Version = 1 + +func (r *T) runMigrations() (err error) { + return r.Update( + func(txn *badger.Txn) (err error) { + var version uint16 + var item *badger.Item + item, err = txn.Get(prefixes.Version.Key()) + if errors.Is(err, badger.ErrKeyNotFound) { + version = 0 + } else if chk.E(err) { + return err + } else { + chk.E( + item.Value( + func(val []byte) (err error) { + version = binary.BigEndian.Uint16(val) + return + }, + ), + ) + } + // do the migrations in increasing steps (there is no rollback) + if version < Version { + // if there is any data in the relay we will stop and notify the user, otherwise we + // just set version to 1 and proceed + prefix := prefixes.Id.Key() + it := txn.NewIterator( + badger.IteratorOptions{ + PrefetchValues: true, + PrefetchSize: 100, + Prefix: prefix, + }, + ) + defer it.Close() + hasAnyEntries := false + for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() { + hasAnyEntries = true + break + } + if hasAnyEntries { + return fmt.Errorf( + "your database is at version %d, but in order to migrate up "+ + "to version 1 you must manually export all the events and then import "+ + "again:\n"+ + "run an old version of this software, export the data, then delete the "+ + "database files, run the new version, import the data back it", + version, + ) + } + chk.E(r.bumpVersion(txn, Version)) + } + return nil + }, + ) +} + +func (r *T) bumpVersion(txn *badger.Txn, version uint16) error { + buf := make([]byte, 2) + binary.BigEndian.PutUint16(buf, version) + return txn.Set(prefixes.Version.Key(), buf) +} diff --git a/ratel/keys/arb/arb.go b/ratel/keys/arb/arb.go new file mode 100644 index 0000000..7c96d74 --- /dev/null +++ b/ratel/keys/arb/arb.go @@ -0,0 +1,94 @@ +// Package arb implements arbitrary length byte keys.Element. In any construction +// there can only be one with arbitrary length. Custom lengths can be created by +// calling New with the custom length in it, both for Read and Write operations. +package arb + +import ( + "bytes" + "io" + "orly.dev/chk" + "orly.dev/log" + + "orly.dev/ratel/keys" +) + +// T is an arbitrary length byte string. In any construction there can only be one with arbitrary length. Custom lengths +// can be created by calling New with the custom length in it, both for Read and Write operations. +type T struct { + Val []byte +} + +var _ keys.Element = &T{} + +// New creates a new arb.T. This must have the expected length for the provided byte slice as this is what the Read +// method will aim to copy. In general this will be a bounded field, either the final or only arbitrary length field in +// a key. +func New[V []byte | string](s V) (p *T) { + b := []byte(s) + if len(b) == 0 { + log.T.Ln( + "empty or nil slice is the same as zero value, " + + "use keys.ReadWithArbElem", + ) + return &T{} + } + return &T{Val: b} +} + +// NewWithLen creates a new arb.T of a given size. +func NewWithLen(l int) (p *T) { return &T{Val: make([]byte, l)} } + +// Write the contents of a bytes.Buffer +func (p *T) Write(buf io.Writer) { + if len(p.Val) == 0 { + log.T.Ln("empty slice has no effect") + return + } + buf.Write(p.Val) +} + +func (p *T) Read(buf io.Reader) (el keys.Element) { + if len(p.Val) < 1 { + log.T.Ln("empty slice has no effect") + return + } + if _, err := buf.Read(p.Val); chk.E(err) { + return nil + } + return p +} + +func (p *T) Len() int { + if p == nil { + panic("uninitialized pointer to arb.T") + } + return len(p.Val) +} + +// ReadWithArbElem is a variant of Read that recognises an arbitrary length element by its zero length and imputes its +// actual length by the byte buffer size and the lengths of the fixed length fields. +// +// For reasons of space efficiency, it is not practical to use TLVs for badger database key fields, so this will panic +// if there is more than one arbitrary length element. +func ReadWithArbElem(b []byte, elems ...keys.Element) { + var arbEl int + var arbSet bool + l := len(b) + for i, el := range elems { + elLen := el.Len() + l -= elLen + if elLen == 0 { + if arbSet { + panic("cannot have more than one arbitrary length field in a key") + } + arbEl = i + arbSet = true + } + } + // now we can say that the remainder is the correct length for the arb element + elems[arbEl] = New(make([]byte, l)) + buf := bytes.NewBuffer(b) + for _, el := range elems { + el.Read(buf) + } +} diff --git a/ratel/keys/arb/arb_test.go b/ratel/keys/arb/arb_test.go new file mode 100644 index 0000000..d713c6e --- /dev/null +++ b/ratel/keys/arb/arb_test.go @@ -0,0 +1,22 @@ +package arb + +import ( + "bytes" + "testing" + + "lukechampine.com/frand" +) + +func TestT(t *testing.T) { + randomBytes := frand.Bytes(frand.Intn(128)) + v := New(randomBytes) + buf := new(bytes.Buffer) + v.Write(buf) + randomCopy := make([]byte, len(randomBytes)) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2 := New(randomCopy) + el := v2.Read(buf2).(*T) + if bytes.Compare(el.Val, v.Val) != 0 { + t.Fatalf("expected %x got %x", v.Val, el.Val) + } +} diff --git a/ratel/keys/count/count.go b/ratel/keys/count/count.go new file mode 100644 index 0000000..40e12d4 --- /dev/null +++ b/ratel/keys/count/count.go @@ -0,0 +1,47 @@ +// Package count contains a series of data types for managing lists of indexes +// for garbage collection. +package count + +import ( + "orly.dev/timestamp" +) + +type Item struct { + Serial uint64 + Size uint32 + Freshness *timestamp.T +} + +type Items []*Item + +func (c Items) Len() int { return len(c) } +func (c Items) Less(i, j int) bool { return c[i].Freshness.I64() < c[j].Freshness.I64() } +func (c Items) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c Items) Total() (total int) { + for i := range c { + total += int(c[i].Size) + } + return +} + +type ItemsBySerial []*Item + +func (c ItemsBySerial) Len() int { return len(c) } +func (c ItemsBySerial) Less(i, j int) bool { return c[i].Serial < c[j].Serial } +func (c ItemsBySerial) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c ItemsBySerial) Total() (total int) { + for i := range c { + total += int(c[i].Size) + } + return +} + +type Fresh struct { + Serial uint64 + Freshness *timestamp.T +} +type Freshes []*Fresh + +func (c Freshes) Len() int { return len(c) } +func (c Freshes) Less(i, j int) bool { return c[i].Freshness.I64() < c[j].Freshness.I64() } +func (c Freshes) Swap(i, j int) { c[i], c[j] = c[j], c[i] } diff --git a/ratel/keys/createdat/createdat.go b/ratel/keys/createdat/createdat.go new file mode 100644 index 0000000..1350978 --- /dev/null +++ b/ratel/keys/createdat/createdat.go @@ -0,0 +1,49 @@ +// Package createdat implements a badger key index keys.Element for timestamps. +package createdat + +import ( + "encoding/binary" + "io" + "orly.dev/chk" + "orly.dev/errorf" + + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/serial" + "orly.dev/timestamp" +) + +const Len = 8 + +type T struct { + Val *timestamp.T +} + +var _ keys.Element = &T{} + +func New(c *timestamp.T) (p *T) { return &T{Val: c} } + +func (c *T) Write(buf io.Writer) { buf.Write(c.Val.Bytes()) } + +func (c *T) Read(buf io.Reader) (el keys.Element) { + b := make([]byte, Len) + if n, err := buf.Read(b); chk.E(err) || n != Len { + return nil + } + c.Val = timestamp.FromUnix(int64(binary.BigEndian.Uint64(b))) + return c +} + +func (c *T) Len() int { return Len } + +// FromKey expects to find a datestamp in the 8 bytes before a serial in a key. +func FromKey(k []byte) (p *T) { + if len(k) < Len+serial.Len { + err := errorf.F( + "cannot get a serial without at least %d bytes", Len+serial.Len, + ) + panic(err) + } + key := make([]byte, 0, Len) + key = append(key, k[len(k)-Len-serial.Len:len(k)-serial.Len]...) + return &T{Val: timestamp.FromBytes(key)} +} diff --git a/ratel/keys/createdat/createdat_test.go b/ratel/keys/createdat/createdat_test.go new file mode 100644 index 0000000..40f2d26 --- /dev/null +++ b/ratel/keys/createdat/createdat_test.go @@ -0,0 +1,26 @@ +package createdat + +import ( + "bytes" + "math" + "testing" + + "lukechampine.com/frand" + + "orly.dev/timestamp" +) + +func TestT(t *testing.T) { + for _ = range 1000000 { + n := timestamp.FromUnix(int64(frand.Intn(math.MaxInt64))) + v := New(n) + buf := new(bytes.Buffer) + v.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2 := New(timestamp.New()) + el := v2.Read(buf2).(*T) + if el.Val.Int() != n.Int() { + t.Fatalf("expected %d got %d", n.Int(), el.Val.Int()) + } + } +} diff --git a/ratel/keys/fullid/fullid.go b/ratel/keys/fullid/fullid.go new file mode 100644 index 0000000..82ee878 --- /dev/null +++ b/ratel/keys/fullid/fullid.go @@ -0,0 +1,48 @@ +// Package fullid implements a keys.Element for a complete 32 byte event Ids. +package fullid + +import ( + "fmt" + "io" + "orly.dev/chk" + + "orly.dev/ratel/keys" + "orly.dev/sha256" + + "orly.dev/eventid" +) + +const Len = sha256.Size + +type T struct { + Val []byte +} + +var _ keys.Element = &T{} + +func New(evID ...*eventid.T) (p *T) { + if len(evID) < 1 { + return &T{make([]byte, Len)} + } + return &T{Val: evID[0].Bytes()} +} + +func (p *T) Write(buf io.Writer) { + if len(p.Val) != Len { + panic(fmt.Sprintln("must use New or initialize Val with len", Len)) + } + buf.Write(p.Val) +} + +func (p *T) Read(buf io.Reader) (el keys.Element) { + // allow uninitialized struct + if len(p.Val) != Len { + p.Val = make([]byte, Len) + } + if n, err := buf.Read(p.Val); chk.E(err) || n != Len { + return nil + } + return p +} + +func (p *T) Len() int { return Len } diff --git a/ratel/keys/fullid/fullid_test.go b/ratel/keys/fullid/fullid_test.go new file mode 100644 index 0000000..6ffd221 --- /dev/null +++ b/ratel/keys/fullid/fullid_test.go @@ -0,0 +1,25 @@ +package fullid + +import ( + "bytes" + "testing" + + "lukechampine.com/frand" + + "orly.dev/eventid" + "orly.dev/sha256" +) + +func TestT(t *testing.T) { + fakeIdBytes := frand.Bytes(sha256.Size) + id := eventid.NewWith(fakeIdBytes) + v := New(id) + buf := new(bytes.Buffer) + v.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2 := New() + el := v2.Read(buf2).(*T) + if bytes.Compare(el.Val, v.Val) != 0 { + t.Fatalf("expected %x got %x", v.Val, el.Val) + } +} diff --git a/ratel/keys/fullpubkey/fullpubkey.go b/ratel/keys/fullpubkey/fullpubkey.go new file mode 100644 index 0000000..b6d6e1e --- /dev/null +++ b/ratel/keys/fullpubkey/fullpubkey.go @@ -0,0 +1,47 @@ +// Package fullpubkey implements a keys.Element for a complete 32 byte nostr +// pubkeys. +package fullpubkey + +import ( + "fmt" + "io" + "orly.dev/chk" + + "orly.dev/ec/schnorr" + "orly.dev/ratel/keys" +) + +const Len = schnorr.PubKeyBytesLen + +type T struct { + Val []byte +} + +var _ keys.Element = &T{} + +func New(evID ...[]byte) (p *T) { + if len(evID) < 1 || len(evID[0]) < 1 { + return &T{make([]byte, Len)} + } + return &T{Val: evID[0]} +} + +func (p *T) Write(buf io.Writer) { + if len(p.Val) != Len { + panic(fmt.Sprintln("must use New or initialize Val with len", Len)) + } + buf.Write(p.Val) +} + +func (p *T) Read(buf io.Reader) (el keys.Element) { + // allow uninitialized struct + if len(p.Val) != Len { + p.Val = make([]byte, Len) + } + if n, err := buf.Read(p.Val); chk.E(err) || n != Len { + return nil + } + return p +} + +func (p *T) Len() int { return len(p.Val) } diff --git a/ratel/keys/fullpubkey/fullpubkey_test.go b/ratel/keys/fullpubkey/fullpubkey_test.go new file mode 100644 index 0000000..8740619 --- /dev/null +++ b/ratel/keys/fullpubkey/fullpubkey_test.go @@ -0,0 +1,23 @@ +package fullpubkey + +import ( + "bytes" + "testing" + + "lukechampine.com/frand" + + "orly.dev/sha256" +) + +func TestT(t *testing.T) { + pk := frand.Bytes(sha256.Size) + v := New(pk) + buf := new(bytes.Buffer) + v.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2 := New() + el := v2.Read(buf2).(*T) + if bytes.Compare(el.Val, v.Val) != 0 { + t.Fatalf("expected %x got %x", v.Val, el.Val) + } +} diff --git a/ratel/keys/id/id.go b/ratel/keys/id/id.go new file mode 100644 index 0000000..c551503 --- /dev/null +++ b/ratel/keys/id/id.go @@ -0,0 +1,72 @@ +// Package id implements a keys.Element for a truncated event Ids containing the +// first 8 bytes of an eventid.T. +package id + +import ( + "fmt" + "io" + "orly.dev/chk" + "orly.dev/errorf" + "strings" + + "orly.dev/ratel/keys" + "orly.dev/sha256" + + "orly.dev/eventid" + "orly.dev/hex" +) + +const Len = 8 + +type T struct { + Val []byte +} + +var _ keys.Element = &T{} + +func New(evID ...*eventid.T) (p *T) { + if len(evID) < 1 || len(evID[0].String()) < 1 { + return &T{make([]byte, Len)} + } + evid := evID[0].String() + if len(evid) < 64 { + evid = strings.Repeat("0", 64-len(evid)) + evid + } + if len(evid) > 64 { + evid = evid[:64] + } + b, err := hex.Dec(evid[:Len*2]) + if chk.E(err) { + return + } + return &T{Val: b} +} + +func NewFromBytes(b []byte) (p *T, err error) { + if len(b) != sha256.Size { + err = errorf.E("event Id must be 32 bytes got: %d %0x", len(b), b) + return + } + p = &T{Val: b[:Len]} + return +} + +func (p *T) Write(buf io.Writer) { + if len(p.Val) != Len { + panic(fmt.Sprintln("must use New or initialize Val with len", Len)) + } + buf.Write(p.Val) +} + +func (p *T) Read(buf io.Reader) (el keys.Element) { + // allow uninitialized struct + if len(p.Val) != Len { + p.Val = make([]byte, Len) + } + if n, err := buf.Read(p.Val); chk.E(err) || n != Len { + return nil + } + return p +} + +func (p *T) Len() int { return Len } diff --git a/ratel/keys/id/id_test.go b/ratel/keys/id/id_test.go new file mode 100644 index 0000000..ff4ab69 --- /dev/null +++ b/ratel/keys/id/id_test.go @@ -0,0 +1,24 @@ +package id + +import ( + "bytes" + "testing" + + "lukechampine.com/frand" + "orly.dev/eventid" + "orly.dev/sha256" +) + +func TestT(t *testing.T) { + fakeIdBytes := frand.Bytes(sha256.Size) + id := eventid.NewWith(fakeIdBytes) + v := New(id) + buf := new(bytes.Buffer) + v.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2 := New() + el := v2.Read(buf2).(*T) + if bytes.Compare(el.Val, v.Val) != 0 { + t.Fatalf("expected %x got %x", v.Val, el.Val) + } +} diff --git a/ratel/keys/index/index.go b/ratel/keys/index/index.go new file mode 100644 index 0000000..4a6909c --- /dev/null +++ b/ratel/keys/index/index.go @@ -0,0 +1,52 @@ +// Package index implements the single byte prefix of the database keys. This +// means a limit of 256 tables but is plenty for a single purpose nostr event +// store. +package index + +import ( + "fmt" + "io" + "orly.dev/chk" + + "orly.dev/ratel/keys" +) + +const Len = 1 + +type T struct { + Val []byte +} + +var _ keys.Element = &T{} + +func New[V byte | P | int](code ...V) (p *T) { + var cod []byte + switch len(code) { + case 0: + cod = []byte{0} + default: + cod = []byte{byte(code[0])} + } + return &T{Val: cod} +} + +func Empty() (p *T) { + return &T{Val: []byte{0}} +} + +func (p *T) Write(buf io.Writer) { + if len(p.Val) != Len { + panic(fmt.Sprintln("must use New or initialize Val with len", Len)) + } + buf.Write(p.Val) +} + +func (p *T) Read(buf io.Reader) (el keys.Element) { + p.Val = make([]byte, Len) + if n, err := buf.Read(p.Val); chk.E(err) || n != Len { + return nil + } + return p +} + +func (p *T) Len() int { return Len } diff --git a/ratel/keys/index/prefixes.go b/ratel/keys/index/prefixes.go new file mode 100644 index 0000000..1308670 --- /dev/null +++ b/ratel/keys/index/prefixes.go @@ -0,0 +1,32 @@ +package index + +import ( + "orly.dev/ratel/keys" +) + +type P byte + +// Key writes a key with the P prefix byte and an arbitrary list of +// keys.Element. +func (p P) Key(element ...keys.Element) (b []byte) { + b = keys.Write( + append([]keys.Element{New(byte(p))}, element...)..., + ) + // log.T.ToSliceOfBytes("key %x", b) + return +} + +// B returns the index.P as a byte. +func (p P) B() byte { return byte(p) } + +// I returns the index.P as an int (for use with the KeySizes. +func (p P) I() int { return int(p) } + +// GetAsBytes todo wat is dis? +func GetAsBytes(prf ...P) (b [][]byte) { + b = make([][]byte, len(prf)) + for i := range prf { + b[i] = []byte{byte(prf[i])} + } + return +} diff --git a/ratel/keys/keys.go b/ratel/keys/keys.go new file mode 100644 index 0000000..8182bb2 --- /dev/null +++ b/ratel/keys/keys.go @@ -0,0 +1,44 @@ +// Package keys is a composable framework for constructing badger keys from +// fields of events. +package keys + +import ( + "bytes" + "io" +) + +// Element is an enveloper for a type that can Read and Write its binary form. +type Element interface { + // Write the binary form of the field into the given bytes.Buffer. + Write(buf io.Writer) + // Read accepts a bytes.Buffer and decodes a field from it. + Read(buf io.Reader) Element + // Len gives the length of the bytes output by the type. + Len() int +} + +// Write the contents of each Element to a byte slice. +func Write(elems ...Element) []byte { + // get the length of the buffer required + var length int + for _, el := range elems { + length += el.Len() + } + buf := bytes.NewBuffer(make([]byte, 0, length)) + // write out the data from each element + for _, el := range elems { + el.Write(buf) + } + return buf.Bytes() +} + +// Read the contents of a byte slice into the provided list of Element types. +func Read(b []byte, elems ...Element) { + buf := bytes.NewBuffer(b) + for _, el := range elems { + el.Read(buf) + } +} + +// Make is a convenience method to wrap a list of Element into a slice. +func Make(elems ...Element) []Element { return elems } diff --git a/ratel/keys/keys_test.go b/ratel/keys/keys_test.go new file mode 100644 index 0000000..af34b4c --- /dev/null +++ b/ratel/keys/keys_test.go @@ -0,0 +1,142 @@ +// package keys_test needs to be a different package name or the implementation +// types imports will circular +package keys_test + +import ( + "bytes" + "crypto/sha256" + "testing" + + "lukechampine.com/frand" + + "orly.dev/ec/schnorr" + "orly.dev/eventid" + "orly.dev/kind" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/id" + "orly.dev/ratel/keys/index" + "orly.dev/ratel/keys/kinder" + "orly.dev/ratel/keys/pubkey" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/timestamp" +) + +func TestElement(t *testing.T) { + for _ = range 100000 { + var failed bool + { // construct a typical key type of structure + // a prefix + np := prefixes.Version + vp := index.New(byte(np)) + // an id + fakeIdBytes := frand.Bytes(sha256.Size) + i := eventid.NewWith(fakeIdBytes) + vid := id.New(i) + // a kinder + n := kind.New(1059) + vk := kinder.New(n.K) + // a pubkey + fakePubkeyBytes := frand.Bytes(schnorr.PubKeyBytesLen) + var vpk *pubkey.T + var err error + vpk, err = pubkey.NewFromBytes(fakePubkeyBytes) + if err != nil { + t.Fatal(err) + } + // a createdat + ts := timestamp.Now() + vca := createdat.New(ts) + // a serial + fakeSerialBytes := frand.Bytes(serial.Len) + vs := serial.New(fakeSerialBytes) + // write Element list into buffer + b := keys.Write(vp, vid, vk, vpk, vca, vs) + // check that values decoded all correctly + // we expect the following types, so we must create them: + var vp2 = index.New(0) + var vid2 = id.New() + var vk2 = kinder.New(0) + var vpk2 *pubkey.T + vpk2, err = pubkey.New() + if err != nil { + t.Fatal(err) + } + var vca2 = createdat.New(timestamp.New()) + var vs2 = serial.New(nil) + // read it in + keys.Read(b, vp2, vid2, vk2, vpk2, vca2, vs2) + // this is a lot of tests, so use switch syntax + switch { + case bytes.Compare(vp.Val, vp2.Val) != 0: + t.Logf( + "failed to decode correctly got %v expected %v", vp2.Val, + vp.Val, + ) + failed = true + fallthrough + case bytes.Compare(vid.Val, vid2.Val) != 0: + t.Logf( + "failed to decode correctly got %v expected %v", vid2.Val, + vid.Val, + ) + failed = true + fallthrough + case vk.Val.ToU16() != vk2.Val.ToU16(): + t.Logf( + "failed to decode correctly got %v expected %v", vk2.Val, + vk.Val, + ) + failed = true + fallthrough + case !bytes.Equal(vpk.Val, vpk2.Val): + t.Logf( + "failed to decode correctly got %v expected %v", vpk2.Val, + vpk.Val, + ) + failed = true + fallthrough + case vca.Val.I64() != vca2.Val.I64(): + t.Logf( + "failed to decode correctly got %v expected %v", vca2.Val, + vca.Val, + ) + failed = true + fallthrough + case !bytes.Equal(vs.Val, vs2.Val): + t.Logf( + "failed to decode correctly got %v expected %v", vpk2.Val, + vpk.Val, + ) + failed = true + } + } + { // construct a counter value + // a createdat + ts := timestamp.Now() + vca := createdat.New(ts) + // a sizer + // n := uint32(frand.Uint64n(math.MaxUint32)) + // write out values + b := keys.Write(vca) + // check that values decoded all correctly + // we expect the following types, so we must create them: + var vca2 = createdat.New(timestamp.New()) + // read it in + keys.Read(b, vca2) + // check they match + + if vca.Val.I64() != vca2.Val.I64() { + t.Logf( + "failed to decode correctly got %v expected %v", vca2.Val, + vca.Val, + ) + failed = true + } + } + if failed { + t.FailNow() + } + } +} diff --git a/ratel/keys/kinder/kind.go b/ratel/keys/kinder/kind.go new file mode 100644 index 0000000..c205714 --- /dev/null +++ b/ratel/keys/kinder/kind.go @@ -0,0 +1,45 @@ +// Package kinder implements a keys.Element for the 16 bit nostr 'kind' value +// for use in indexes. +package kinder + +import ( + "encoding/binary" + "io" + "orly.dev/chk" + + "orly.dev/kind" + "orly.dev/ratel/keys" +) + +const Len = 2 + +type T struct { + Val *kind.T +} + +var _ keys.Element = &T{} + +// New creates a new kinder.T for reading/writing kind.T values. +func New[V uint16 | uint32 | int32 | uint64 | int64 | int](c V) (p *T) { return &T{Val: kind.New(c)} } + +func Make(c *kind.T) (v []byte) { + v = make([]byte, Len) + binary.BigEndian.PutUint16(v, c.K) + return +} + +func (c *T) Write(buf io.Writer) { + buf.Write(Make(c.Val)) +} + +func (c *T) Read(buf io.Reader) (el keys.Element) { + b := make([]byte, Len) + if n, err := buf.Read(b); chk.E(err) || n != Len { + return nil + } + v := binary.BigEndian.Uint16(b) + c.Val = kind.New(v) + return c +} + +func (c *T) Len() int { return Len } diff --git a/ratel/keys/kinder/kind_test.go b/ratel/keys/kinder/kind_test.go new file mode 100644 index 0000000..dce57cb --- /dev/null +++ b/ratel/keys/kinder/kind_test.go @@ -0,0 +1,21 @@ +package kinder + +import ( + "bytes" + "testing" + + "orly.dev/kind" +) + +func TestT(t *testing.T) { + n := kind.New(1059) + v := New(n.ToU16()) + buf := new(bytes.Buffer) + v.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2 := New(0) + el := v2.Read(buf2).(*T) + if el.Val.ToU16() != n.ToU16() { + t.Fatalf("expected %d got %d", n, el.Val) + } +} diff --git a/ratel/keys/pubkey/pubkey.go b/ratel/keys/pubkey/pubkey.go new file mode 100644 index 0000000..860bb43 --- /dev/null +++ b/ratel/keys/pubkey/pubkey.go @@ -0,0 +1,75 @@ +// Package pubkey implements an 8 byte truncated public key implementation of a +// keys.Element. +package pubkey + +import ( + "fmt" + "io" + "orly.dev/chk" + "orly.dev/log" + + "orly.dev/ec/schnorr" + "orly.dev/ratel/keys" +) + +const Len = 8 + +type T struct { + Val []byte +} + +var _ keys.Element = &T{} + +// New creates a new pubkey prefix, if parameter is omitted, new one is +// allocated (for read) if more than one is given, only the first is used, and +// if the first one is not the correct hexadecimal length of 64, return error. +func New(pk ...[]byte) (p *T, err error) { + if len(pk) < 1 { + // allows init with no parameter + return &T{make([]byte, Len)}, nil + } + // // only the first pubkey will be used + if len(pk[0]) != schnorr.PubKeyBytesLen { + err = log.E.Err("pubkey hex must be 32 chars, got", len(pk[0])) + return + } + return &T{Val: pk[0][:Len]}, nil +} + +func NewFromBytes(pkb []byte) (p *T, err error) { + if len(pkb) != schnorr.PubKeyBytesLen { + err = log.E.Err( + "provided key not correct length, got %d expected %d", + len(pkb), schnorr.PubKeyBytesLen, + ) + log.T.S(pkb) + return + } + b := make([]byte, Len) + copy(b, pkb[:Len]) + p = &T{Val: b} + return +} + +func (p *T) Write(buf io.Writer) { + if p == nil { + panic("nil pubkey") + } + if p.Val == nil || len(p.Val) != Len { + panic(fmt.Sprintln("must use New or initialize Val with len", Len)) + } + buf.Write(p.Val) +} + +func (p *T) Read(buf io.Reader) (el keys.Element) { + // allow uninitialized struct + if len(p.Val) != Len { + p.Val = make([]byte, Len) + } + if n, err := buf.Read(p.Val); chk.E(err) || n != Len { + return nil + } + return p +} + +func (p *T) Len() int { return Len } diff --git a/ratel/keys/pubkey/pubkey_test.go b/ratel/keys/pubkey/pubkey_test.go new file mode 100644 index 0000000..bb6a46e --- /dev/null +++ b/ratel/keys/pubkey/pubkey_test.go @@ -0,0 +1,29 @@ +package pubkey + +import ( + "bytes" + "orly.dev/chk" + "testing" + + "lukechampine.com/frand" + + "orly.dev/ec/schnorr" +) + +func TestT(t *testing.T) { + for _ = range 10000000 { + fakePubkeyBytes := frand.Bytes(schnorr.PubKeyBytesLen) + v, err := New(fakePubkeyBytes) + if chk.E(err) { + t.FailNow() + } + buf := new(bytes.Buffer) + v.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2, _ := New() + el := v2.Read(buf2).(*T) + if bytes.Compare(el.Val, v.Val) != 0 { + t.Fatalf("expected %x got %x", v.Val, el.Val) + } + } +} diff --git a/ratel/keys/serial/serial.go b/ratel/keys/serial/serial.go new file mode 100644 index 0000000..3a2cc25 --- /dev/null +++ b/ratel/keys/serial/serial.go @@ -0,0 +1,85 @@ +// Package serial implements a keys.Element for encoding a serial (monotonic 64 +// bit counter) for stored events, used to link an index to the main data table. +package serial + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "orly.dev/chk" + + "orly.dev/ratel/keys" +) + +const Len = 8 + +// T is a badger DB serial number used for conflict free event record keys. +type T struct { + Val []byte +} + +var _ keys.Element = &T{} + +// New returns a new serial record key.Element - if nil or short slice is given, +// initialize a fresh one with Len (for reading), otherwise if equal or longer, +// trim if long and store into struct (for writing). +func New(ser []byte) (p *T) { + switch { + case len(ser) < Len: + // log.I.Ln("empty serial") + // allows use of nil to init + ser = make([]byte, Len) + default: + ser = ser[:Len] + } + return &T{Val: ser} +} + +// FromKey expects the last Len bytes of the given slice to be the serial. +func FromKey(k []byte) (p *T) { + if len(k) < Len { + panic(fmt.Sprintf("cannot get a serial without at least 8 bytes %x", k)) + } + key := make([]byte, Len) + copy(key, k[len(k)-Len:]) + return &T{Val: key} +} + +func Make(s uint64) (ser []byte) { + ser = make([]byte, 8) + binary.BigEndian.PutUint64(ser, s) + return +} + +func (p *T) Write(buf io.Writer) { + if len(p.Val) != Len { + panic(fmt.Sprintln("must use New or initialize Val with len", Len)) + } + buf.Write(p.Val) +} + +func (p *T) Read(buf io.Reader) (el keys.Element) { + // allow uninitialized struct + if len(p.Val) != Len { + p.Val = make([]byte, Len) + } + if n, err := buf.Read(p.Val); chk.E(err) || n != Len { + return nil + } + return p +} + +func (p *T) Len() int { return Len } +func (p *T) Uint64() (u uint64) { return binary.BigEndian.Uint64(p.Val) } + +// Match compares a key bytes to a serial, all indexes have the serial at +// the end indicating the event record they refer to, and if they match returns +// true. +func Match(index, ser []byte) bool { + l := len(index) + if l < Len { + return false + } + return bytes.Compare(index[l-Len:], ser) == 0 +} diff --git a/ratel/keys/serial/serial_test.go b/ratel/keys/serial/serial_test.go new file mode 100644 index 0000000..8606f5e --- /dev/null +++ b/ratel/keys/serial/serial_test.go @@ -0,0 +1,23 @@ +package serial_test + +import ( + "bytes" + "testing" + + "orly.dev/ratel/keys/serial" + + "lukechampine.com/frand" +) + +func TestT(t *testing.T) { + fakeSerialBytes := frand.Bytes(serial.Len) + v := serial.New(fakeSerialBytes) + buf := new(bytes.Buffer) + v.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + v2 := &serial.T{} // or can use New(nil) + el := v2.Read(buf2).(*serial.T) + if bytes.Compare(el.Val, v.Val) != 0 { + t.Fatalf("expected %x got %x", v.Val, el.Val) + } +} diff --git a/ratel/keys/tombstone/tombstone.go b/ratel/keys/tombstone/tombstone.go new file mode 100644 index 0000000..e7029ad --- /dev/null +++ b/ratel/keys/tombstone/tombstone.go @@ -0,0 +1,49 @@ +// Package tombstone is a 16 byte truncated event Id for keys.Element used to +// mark an event as being deleted so it isn't saved again. +package tombstone + +import ( + "io" + "orly.dev/chk" + "orly.dev/log" + + "orly.dev/eventid" + "orly.dev/ratel/keys" +) + +const Len = 16 + +type T struct { + val []byte +} + +var _ keys.Element = &T{} + +func Make(eid *eventid.T) (v []byte) { + v = make([]byte, Len) + copy(v, eid.Bytes()) + return +} + +func New() (t *T) { return new(T) } + +func NewWith(eid *eventid.T) (t *T) { + t = &T{val: Make(eid)} + return +} + +func (t *T) Write(buf io.Writer) { + buf.Write(t.val) +} + +func (t *T) Read(buf io.Reader) (el keys.Element) { + b := make([]byte, Len) + if n, err := buf.Read(b); chk.E(err) || n < Len { + log.I.S(n, err) + return nil + } + t.val = b + return &T{val: b} +} + +func (t *T) Len() int { return Len } diff --git a/ratel/keys/tombstone/tombstone_test.go b/ratel/keys/tombstone/tombstone_test.go new file mode 100644 index 0000000..cc0049d --- /dev/null +++ b/ratel/keys/tombstone/tombstone_test.go @@ -0,0 +1,23 @@ +package tombstone + +import ( + "bytes" + "testing" + + "lukechampine.com/frand" + + "orly.dev/eventid" +) + +func TestT(t *testing.T) { + id := frand.Entropy256() + ts := NewWith(eventid.NewWith(id[:])) + buf := new(bytes.Buffer) + ts.Write(buf) + buf2 := bytes.NewBuffer(buf.Bytes()) + ts2 := New() + ts2.Read(buf2) + if !bytes.Equal(ts.val, ts2.val) { + t.Errorf("expected %0x got %0x", ts.val, ts2.val) + } +} diff --git a/ratel/keys/util_test.go b/ratel/keys/util_test.go new file mode 100644 index 0000000..63a0a44 --- /dev/null +++ b/ratel/keys/util_test.go @@ -0,0 +1,9 @@ +package keys_test + +import ( + "orly.dev/lol" +) + +var ( + log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf +) diff --git a/ratel/log.go b/ratel/log.go new file mode 100644 index 0000000..c8248d6 --- /dev/null +++ b/ratel/log.go @@ -0,0 +1,69 @@ +package ratel + +import ( + "fmt" + "orly.dev/log" + "runtime" + "strings" + + "orly.dev/atomic" + "orly.dev/lol" +) + +// NewLogger creates a new badger logger. +func NewLogger(logLevel int, label string) (l *logger) { + log.T.Ln("getting logger for", label) + l = &logger{Label: label} + l.Level.Store(int32(logLevel)) + return +} + +type logger struct { + Level atomic.Int32 + Label string +} + +// SetLogLevel atomically adjusts the log level to the given log level code. +func (l *logger) SetLogLevel(level int) { + l.Level.Store(int32(level)) +} + +// Errorf is a log printer for this level of message. +func (l *logger) Errorf(s string, i ...interface{}) { + if l.Level.Load() >= lol.Error { + s = l.Label + ": " + s + txt := fmt.Sprintf(s, i...) + _, file, line, _ := runtime.Caller(2) + log.E.F("%s\n%s:%d", strings.TrimSpace(txt), file, line) + } +} + +// Warningf is a log printer for this level of message. +func (l *logger) Warningf(s string, i ...interface{}) { + if l.Level.Load() >= lol.Warn { + s = l.Label + ": " + s + txt := fmt.Sprintf(s, i...) + _, file, line, _ := runtime.Caller(2) + log.D.F("%s\n%s:%d", strings.TrimSpace(txt), file, line) + } +} + +// Infof is a log printer for this level of message. +func (l *logger) Infof(s string, i ...interface{}) { + if l.Level.Load() >= lol.Info { + s = l.Label + ": " + s + txt := fmt.Sprintf(s, i...) + _, file, line, _ := runtime.Caller(2) + log.D.F("%s\n%s:%d", strings.TrimSpace(txt), file, line) + } +} + +// Debugf is a log printer for this level of message. +func (l *logger) Debugf(s string, i ...interface{}) { + if l.Level.Load() >= lol.Debug { + s = l.Label + ": " + s + txt := fmt.Sprintf(s, i...) + _, file, line, _ := runtime.Caller(2) + log.T.F("%s\n%s:%d", strings.TrimSpace(txt), file, line) + } +} diff --git a/ratel/main.go b/ratel/main.go new file mode 100644 index 0000000..abbbe2b --- /dev/null +++ b/ratel/main.go @@ -0,0 +1,169 @@ +// Package ratel is a badger DB based event store with optional cache management +// and capability to be used as a pruning cache along with a secondary larger +// event store. +package ratel + +import ( + "encoding/binary" + "github.com/dgraph-io/badger/v4" + "orly.dev/chk" + "orly.dev/lol" + "sync" + + "orly.dev/context" + "orly.dev/interfaces/store" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" +) + +// DefaultMaxLimit is set to a size that means the usual biggest batch of events sent to a +// client usually is at most about 256kb or so. +const DefaultMaxLimit = 512 + +// T is a badger event store database with layer2 and garbage collection. +type T struct { + Ctx context.T + WG *sync.WaitGroup + dataDir string + // DBSizeLimit is the number of bytes we want to keep the data store from exceeding. + // DBSizeLimit int + // // DBLowWater is the percentage of DBSizeLimit a GC run will reduce the used storage down + // // to. + // DBLowWater int + // // DBHighWater is the trigger point at which a GC run should start if exceeded. + // DBHighWater int + // // GCFrequency is the frequency of checks of the current utilisation. + // GCFrequency time.Duration + // HasL2 bool + // BlockCacheSize int + InitLogLevel int + Logger *logger + // DB is the badger db + *badger.DB + // seq is the monotonic collision-free index for raw event storage. + seq *badger.Sequence + // Threads is how many CPU threads we dedicate to concurrent actions, flatten and GC mark + Threads int + // MaxLimit is a default limit that applies to a query without a limit, to avoid sending out + // too many events to a client from a malformed or excessively broad filter. + MaxLimit int + // // ActuallyDelete sets whether we actually delete or rewrite deleted entries with a modified + // // deleted prefix value (8th bit set) + // ActuallyDelete bool + // Flatten should be set to true to trigger a flatten at close... this is mainly + // triggered by running an import + Flatten bool + // // UseCompact uses a compact encoding based on the canonical format (generate + // // hash of it to get Id field with the signature in raw binary after. + // UseCompact bool + // // Compression sets the compression to use, none/snappy/zstd + // Compression string +} + +func (r *T) SetLogLevel(level string) { + r.Logger.SetLogLevel(lol.GetLogLevel(level)) +} + +var _ store.I = (*T)(nil) + +// BackendParams is the configurations used in creating a new ratel.T. +type BackendParams struct { + Ctx context.T + WG *sync.WaitGroup + HasL2, UseCompact bool + BlockCacheSize, LogLevel, MaxLimit int + Compression string // none,snappy,zstd + Extra []int +} + +// New configures a a new ratel.T event store. +func New(p BackendParams, params ...int) *T { + return GetBackend( + p.Ctx, p.WG, p.HasL2, p.UseCompact, p.BlockCacheSize, p.LogLevel, + p.MaxLimit, + p.Compression, params..., + ) +} + +// GetBackend returns a reasonably configured badger.Backend. +// +// The variadic params correspond to DBSizeLimit, DBLowWater, DBHighWater and +// GCFrequency as an integer multiplier of number of seconds. +// +// Note that the cancel function for the context needs to be managed by the +// caller. +// +// Deprecated: use New instead. +func GetBackend( + Ctx context.T, WG *sync.WaitGroup, hasL2, useCompact bool, + blockCacheSize, logLevel, maxLimit int, compression string, params ...int, +) (b *T) { + // var sizeLimit, lw, hw, freq = 0, 50, 66, 3600 + // switch len(params) { + // case 4: + // freq = params[3] + // fallthrough + // case 3: + // hw = params[2] + // fallthrough + // case 2: + // lw = params[1] + // fallthrough + // case 1: + // sizeLimit = params[0] * units.Gb + // } + // if unset, assume a safe maximum limit for unlimited filters. + if maxLimit == 0 { + maxLimit = 512 + } + b = &T{ + Ctx: Ctx, + WG: WG, + // DBSizeLimit: sizeLimit, + // DBLowWater: lw, + // DBHighWater: hw, + // GCFrequency: time.Duration(freq) * time.Second, + // HasL2: hasL2, + // BlockCacheSize: blockCacheSize, + InitLogLevel: logLevel, + MaxLimit: maxLimit, + // UseCompact: useCompact, + // Compression: compression, + } + return +} + +// Path returns the path where the database files are stored. +func (r *T) Path() string { return r.dataDir } + +// SerialKey returns a key used for storing events, and the raw serial counter +// bytes to copy into index keys. +func (r *T) SerialKey() (idx []byte, ser *serial.T) { + var err error + var s []byte + if s, err = r.SerialBytes(); chk.E(err) { + panic(err) + } + ser = serial.New(s) + return prefixes.Event.Key(ser), ser +} + +// Serial returns the next monotonic conflict free unique serial on the database. +func (r *T) Serial() (ser uint64, err error) { + if ser, err = r.seq.Next(); chk.E(err) { + } + // log.T.ToSliceOfBytes("serial %x", ser) + return +} + +// SerialBytes returns a new serial value, used to store an event record with a +// conflict-free unique code (it is a monotonic, atomic, ascending counter). +func (r *T) SerialBytes() (ser []byte, err error) { + var serU64 uint64 + if serU64, err = r.Serial(); chk.E(err) { + panic(err) + } + ser = make([]byte, serial.Len) + binary.BigEndian.PutUint64(ser, serU64) + return +} diff --git a/ratel/nuke.go b/ratel/nuke.go new file mode 100644 index 0000000..414e4ab --- /dev/null +++ b/ratel/nuke.go @@ -0,0 +1,19 @@ +package ratel + +import ( + "orly.dev/chk" + "orly.dev/log" + "orly.dev/ratel/prefixes" +) + +func (r *T) Wipe() (err error) { + log.W.F("nuking database at %s", r.dataDir) + log.I.S(prefixes.AllPrefixes) + if err = r.DB.DropPrefix(prefixes.AllPrefixes...); chk.E(err) { + return + } + if err = r.DB.RunValueLogGC(0.8); chk.E(err) { + return + } + return +} diff --git a/ratel/prefixes/index_test.go b/ratel/prefixes/index_test.go new file mode 100644 index 0000000..b937ba0 --- /dev/null +++ b/ratel/prefixes/index_test.go @@ -0,0 +1,21 @@ +package prefixes + +import ( + "bytes" + "testing" + + "orly.dev/ratel/keys/index" +) + +func TestT(t *testing.T) { + v := Version.Key() + // v := New(n) + // buf := new(bytes.Buffer) + // v.Write(buf) + buf2 := bytes.NewBuffer(v) + v2 := index.New(0) + el := v2.Read(buf2).(*index.T) + if el.Val[0] != v[0] { + t.Fatalf("expected %d got %d", v[0], el.Val) + } +} diff --git a/ratel/prefixes/prefixes.go b/ratel/prefixes/prefixes.go new file mode 100644 index 0000000..f421f74 --- /dev/null +++ b/ratel/prefixes/prefixes.go @@ -0,0 +1,180 @@ +// Package prefixes provides a list of the index.P types that designate tables +// in the ratel event store, as well as enabling a simple syntax to assemble and +// decompose an index key into its keys.Element s. +package prefixes + +import ( + "orly.dev/ec/schnorr" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/fullid" + "orly.dev/ratel/keys/id" + "orly.dev/ratel/keys/index" + "orly.dev/ratel/keys/kinder" + "orly.dev/ratel/keys/pubkey" + "orly.dev/ratel/keys/serial" + "orly.dev/sha256" +) + +const ( + // Version is the key that stores the version number, the value is a 16-bit + // integer (2 bytes) + // + // [ 255 ][ 2 byte/16 bit version code ] + Version index.P = 255 + + // Event is the prefix used with a Serial counter value provided by badgerDB to + // provide conflict-free 8 byte 64-bit unique keys for event records, which + // follows the prefix. + // + // [ 1 ][ 8 bytes Serial ] + Event index.P = iota + + // CreatedAt creates an index key that contains the unix + // timestamp of the event record serial. + // + // [ 2 ][ 8 bytes timestamp.T ][ 8 bytes Serial ] + CreatedAt + + // Id contains the first 8 bytes of the Id of the event and the 8 + // byte Serial of the event record. + // + // [ 3 ][ 8 bytes eventid.T prefix ][ 8 bytes Serial ] + Id + + // Kind contains the kind and datestamp. + // + // [ 4 ][ 2 bytes kind.T ][ 8 bytes timestamp.T ][ 8 bytes Serial ] + Kind + + // Pubkey contains pubkey prefix and timestamp. + // + // [ 5 ][ 8 bytes pubkey prefix ][ 8 bytes timestamp.T ][ 8 bytes Serial ] + Pubkey + + // PubkeyKind contains pubkey prefix, kind and timestamp. + // + // [ 6 ][ 8 bytes pubkey prefix ][ 2 bytes kind.T ][ 8 bytes timestamp.T ][ 8 bytes Serial ] + PubkeyKind + + // Tag is for miscellaneous arbitrary length tags, with timestamp and event + // serial after. + // + // [ 7 ][ tag string 1 <= 100 bytes ][ 8 bytes timestamp.T ][ 8 bytes Serial ] + Tag + + // Tag32 contains the 8 byte pubkey prefix, timestamp and serial. + // + // [ 8 ][ 8 bytes pubkey prefix ][ 8 bytes timestamp.T ][ 8 bytes Serial ] + Tag32 + + // TagAddr contains the kind, pubkey prefix, value (index 2) of address tag (eg + // relay address), followed by timestamp and serial. + // + // [ 9 ][ 2 byte kind.T][ 8 byte pubkey prefix ][ network address ][ 8 byte timestamp.T ][ 8 byte Serial ] + TagAddr + + // Counter is the eventid.T prefix, value stores the average time of access + // (average of all access timestamps) and the size of the record. + // + // [ 10 ][ 8 bytes Serial ] : value: [ 8 bytes timestamp ] + Counter + + // Tombstone is an index that contains the left half of an event Id that has + // been deleted. The purpose of this event is to stop the event being + // republished, as a delete event may not be respected by other relays and + // eventually lead to a republication. The timestamp is added at the end to + // enable pruning the oldest tombstones. + // + // [ 11 ][ 16 bytes first/left half of event Id ][ 8 bytes timestamp ] + Tombstone + + // PubkeyIndex is the prefix for an index that stores a mapping between pubkeys + // and a pubkey serial. + // + // todo: this is useful feature but rather than for saving space on pubkeys in + // events might have a more useful place in some kind of search API. eg just + // want pubkey from event id, combined with FullIndex. + // + // [ 12 ][ 32 bytes pubkey ][ 8 bytes pubkey serial ] + PubkeyIndex + + // FullIndex is a secondary table for Ids that is used to fetch the full Id + // hash instead of fetching and unmarshalling the event. The Id index will + // ultimately be deprecated in favor of this because returning event Ids and + // letting the client handle pagination reduces relay complexity. + // + // In addition, as a mechanism of sorting, the event Id bears also a timestamp + // from its created_at field. The serial acts as a "first seen" ordering, then + // you also have the (claimed) chronological ordering. + // + // [ 13 ][ 8 bytes Serial ][ 32 bytes eventid.T ][ 32 bytes pubkey ][ 8 bytes timestamp.T ] + FullIndex + + // Configuration is a free-form minified JSON object that contains a collection of + // configuration items. + // + // [ 14 ] + Configuration +) + +// FilterPrefixes is a slice of the prefixes used by filter index to enable a loop +// for pulling events matching a serial +var FilterPrefixes = [][]byte{ + {CreatedAt.B()}, + {Id.B()}, + {Kind.B()}, + {Pubkey.B()}, + {PubkeyKind.B()}, + {Tag.B()}, + {Tag32.B()}, + {TagAddr.B()}, + {FullIndex.B()}, +} + +// AllPrefixes is used to do a full database nuke. +var AllPrefixes = [][]byte{ + {Event.B()}, + {CreatedAt.B()}, + {Id.B()}, + {Kind.B()}, + {Pubkey.B()}, + {PubkeyKind.B()}, + {Tag.B()}, + {Tag32.B()}, + {TagAddr.B()}, + {Counter.B()}, + {PubkeyIndex.B()}, + {FullIndex.B()}, +} + +// KeySizes are the byte size of keys of each type of key prefix. int(P) or call the P.I() method +// corresponds to the index 1:1. For future index additions be sure to add the +// relevant KeySizes sum as it describes the data for a programmer. +var KeySizes = []int{ + // Event + 1 + serial.Len, + // CreatedAt + 1 + createdat.Len + serial.Len, + // Id + 1 + id.Len + serial.Len, + // Kind + 1 + kinder.Len + createdat.Len + serial.Len, + // Pubkey + 1 + pubkey.Len + createdat.Len + serial.Len, + // PubkeyKind + 1 + pubkey.Len + kinder.Len + createdat.Len + serial.Len, + // Tag (worst case scenario) + 1 + 100 + createdat.Len + serial.Len, + // Tag32 + 1 + pubkey.Len + createdat.Len + serial.Len, + // TagAddr + 1 + kinder.Len + pubkey.Len + 100 + createdat.Len + serial.Len, + // Counter + 1 + serial.Len, + // Tombstone + 1 + sha256.Size/2 + serial.Len, + // PubkeyIndex + 1 + schnorr.PubKeyBytesLen + serial.Len, + // FullIndex + 1 + fullid.Len + createdat.Len + serial.Len, +} diff --git a/ratel/preparequeries.go b/ratel/preparequeries.go new file mode 100644 index 0000000..e9057c6 --- /dev/null +++ b/ratel/preparequeries.go @@ -0,0 +1,202 @@ +package ratel + +import ( + "encoding/binary" + "fmt" + "math" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" + + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/filter" + "orly.dev/ratel/keys/id" + "orly.dev/ratel/keys/kinder" + "orly.dev/ratel/keys/pubkey" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/timestamp" +) + +type Results struct { + Ev *event.E + TS *timestamp.T + Ser *serial.T +} + +type query struct { + index int + queryFilter *filter.F + searchPrefix []byte + start []byte + skipTS bool +} + +// PrepareQueries analyses a filter and generates a set of query specs that produce +// key prefixes to search for in the badger key indexes. +func PrepareQueries(f *filter.F) ( + qs []query, + ext *filter.F, + since uint64, + err error, +) { + if f == nil { + err = errorf.E("filter cannot be nil") + return + } + switch { + // first if there is Ids, just search for them, this overrides all other filters + case f.Ids.Len() > 0: + qs = make([]query, f.Ids.Len()) + for i, idB := range f.Ids.ToSliceOfBytes() { + ih := id.New(eventid.NewWith(idB)) + if ih == nil { + log.E.F("failed to decode event Id: %s", idB) + // just ignore it, clients will be clients + continue + } + prf := prefixes.Id.Key(ih) + // log.F.ToSliceOfBytes("id prefix to search on %0x from key %0x", prf, ih.Val) + qs[i] = query{ + index: i, + queryFilter: f, + searchPrefix: prf, + skipTS: true, + } + } + // log.F.S("ids", qs) + // second we make a set of queries based on author pubkeys, optionally with kinds + case f.Authors.Len() > 0: + // if there are no kinds, we just make the queries based on the author pub keys + if f.Kinds.Len() == 0 { + qs = make([]query, f.Authors.Len()) + for i, pubkeyHex := range f.Authors.ToSliceOfBytes() { + var pk *pubkey.T + if pk, err = pubkey.New(pubkeyHex); chk.E(err) { + // bogus filter, continue anyway + continue + } + sp := prefixes.Pubkey.Key(pk) + // log.I.ToSliceOfBytes("search only for authors %0x from pub key %0x", sp, pk.Val) + qs[i] = query{ + index: i, + queryFilter: f, + searchPrefix: sp, + } + } + // log.I.S("authors", qs) + } else { + // if there is kinds as well, we are searching via the kind/pubkey prefixes + qs = make([]query, f.Authors.Len()*f.Kinds.Len()) + i := 0 + authors: + for _, pubkeyHex := range f.Authors.ToSliceOfBytes() { + for _, kind := range f.Kinds.K { + var pk *pubkey.T + if pk, err = pubkey.New(pubkeyHex); chk.E(err) { + // skip this dodgy thing + continue authors + } + ki := kinder.New(kind.K) + sp := prefixes.PubkeyKind.Key(pk, ki) + // log.F.ToSliceOfBytes("search for authors from pub key %0x and kind %0x", pk.Val, ki.Val) + qs[i] = query{index: i, queryFilter: f, searchPrefix: sp} + i++ + } + } + // log.F.S("authors/kinds", qs) + } + if f.Tags.Len() > 0 { + ext = &filter.F{Tags: f.Tags} + // log.F.S("extra filter", ext) + } + case f.Tags.Len() > 0: + // determine the size of the queries array by inspecting all tags sizes + size := 0 + for _, values := range f.Tags.ToSliceOfTags() { + size += values.Len() - 1 + } + if size == 0 { + return nil, nil, 0, fmt.Errorf("empty tag filters") + } + // we need a query for each tag search + qs = make([]query, size) + // and any kinds mentioned as well in extra filter + ext = &filter.F{Kinds: f.Kinds} + i := 0 + for _, values := range f.Tags.ToSliceOfTags() { + for _, value := range values.ToSliceOfBytes()[1:] { + // get key prefix (with full length) and offset where to write the last parts + var prf []byte + if prf, err = GetTagKeyPrefix(string(value)); chk.E(err) { + continue + } + // remove the last part to get just the prefix we want here + qs[i] = query{index: i, queryFilter: f, searchPrefix: prf} + i++ + } + } + // log.F.S("tags", qs) + case f.Kinds.Len() > 0: + // if there is no ids, pubs or tags, we are just searching for kinds + qs = make([]query, f.Kinds.Len()) + for i, kind := range f.Kinds.K { + kk := kinder.New(kind.K) + ki := prefixes.Kind.Key(kk) + qs[i] = query{ + index: i, + queryFilter: f, + searchPrefix: ki, + } + } + // log.F.S("kinds", qs) + default: + log.I.F("nothing in filter, returning latest events") + // if len(qs) > 0 { + qs = append( + qs, query{ + index: 0, queryFilter: f, searchPrefix: []byte{1}, + start: []byte{1, 255, 255, 255, 255, 255, 255, 255, 255}, + // }) + // qs = append(qs, query{index: 0, queryFilter: f, + // searchPrefix: prefixes.CreatedAt.Key(), + skipTS: true, + }, + ) + ext = nil + // } + // // log.F.S("other", qs) + } + + // this is where we'll end the iteration + if f.Since != nil { + if fs := f.Since.U64(); fs > since { + since = fs + } + } + // log.I.ToSliceOfBytes("since %d", since) + + var until uint64 = math.MaxInt64 + if f.Until != nil { + if fu := f.Until.U64(); fu < until { + until = fu + 1 + } + } + // log.I.ToSliceOfBytes("until %d", until) + for i, q := range qs { + qs[i].start = binary.BigEndian.AppendUint64( + q.searchPrefix, uint64(until), + ) + } + // if we got an empty filter, we still need a query for scraping the newest + if len(qs) == 0 { + qs = append( + qs, query{ + index: 0, queryFilter: f, searchPrefix: []byte{1}, + start: []byte{1, 255, 255, 255, 255, 255, 255, 255, 255}, + }, + ) + } + return +} diff --git a/ratel/queryevents.go b/ratel/queryevents.go new file mode 100644 index 0000000..117e7f2 --- /dev/null +++ b/ratel/queryevents.go @@ -0,0 +1,293 @@ +package ratel + +import ( + "errors" + "github.com/dgraph-io/badger/v4" + "orly.dev/chk" + "orly.dev/log" + "sort" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/filter" + "orly.dev/hex" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" +) + +func (r *T) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) { + log.T.F("QueryEvents %s\n", f.Serialize()) + evMap := make(map[string]*event.E) + var queries []query + var ext *filter.F + var since uint64 + if queries, ext, since, err = PrepareQueries(f); chk.E(err) { + return + } + // log.I.S(f, queries) + limit := r.MaxLimit + if f.Limit != nil { + limit = int(*f.Limit) + } + // search for the keys generated from the filter + var total int + eventKeys := make(map[string]struct{}) + for _, q := range queries { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + err = r.View( + func(txn *badger.Txn) (err error) { + // iterate only through keys and in reverse order + opts := badger.IteratorOptions{ + Reverse: true, + } + it := txn.NewIterator(opts) + defer it.Close() + for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + item := it.Item() + k := item.KeyCopy(nil) + if !q.skipTS { + if len(k) < createdat.Len+serial.Len { + continue + } + createdAt := createdat.FromKey(k) + if createdAt.Val.U64() < since { + break + } + } + ser := serial.FromKey(k) + idx := prefixes.Event.Key(ser) + eventKeys[string(idx)] = struct{}{} + total++ + // some queries just produce stupid amounts of matches, they are a resource + // exhaustion attack vector and only spiders make them + if total >= r.MaxLimit { + return + } + } + return + }, + ) + if chk.E(err) { + // this means shutdown, probably + if errors.Is(err, badger.ErrDBClosed) { + return + } + } + } + log.T.F( + "found %d event indexes from %d queries", len(eventKeys), len(queries), + ) + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + var delEvs [][]byte + defer func() { + for _, d := range delEvs { + // if events were found that should be deleted, delete them + chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d))) + } + }() + // accessed := make(map[string]struct{}) + for ek := range eventKeys { + eventKey := []byte(ek) + err = r.View( + func(txn *badger.Txn) (err error) { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + opts := badger.IteratorOptions{Reverse: true} + it := txn.NewIterator(opts) + defer it.Close() + for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() { + item := it.Item() + // if r.HasL2 && item.ValueSize() == sha256.Size { + // // todo: this isn't actually calling anything right now, it should be + // // accumulating to propagate the query (this means response lag also) + // // + // // this is a stub entry that indicates an L2 needs to be accessed for it, so we + // // populate only the event.F.Id and return the result, the caller will expect + // // this as a signal to query the L2 event store. + // var eventValue []byte + // ev := &event.F{} + // if eventValue, err = item.ValueCopy(nil); chk.E(err) { + // continue + // } + // log.F.F("found event stub %0x must seek in L2", eventValue) + // ev.Id = eventValue + // select { + // case <-c.Done(): + // return + // case <-r.Ctx.Done(): + // log.F.Ln("backend context canceled") + // return + // default: + // } + // evMap[hex.Enc(ev.Id)] = ev + // return + // } + ev := &event.E{} + if err = item.Value( + func(eventValue []byte) (err error) { + log.I.F("%s", eventValue) + var rem []byte + if rem, err = r.Unmarshal( + ev, eventValue, + ); chk.E(err) { + return + } + if len(rem) > 0 { + log.T.S(rem) + } + // if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil { + // var exp uint64 + // if exp, err = strconv.ParseUint(string(et.Value()), 10, + // 64); chk.E(err) { + // return + // } + // if int64(exp) > time.Now().Unix() { + // // this needs to be deleted + // delEvs = append(delEvs, ev.Id) + // ev = nil + // return + // } + // } + return + }, + ); chk.E(err) { + continue + } + if ev == nil { + continue + } + // if ext != nil { + // log.I.S(ext) + // log.I.S(ev) + // log.I.S(ext.Matches(ev)) + // } + if ext == nil || ext.Matches(ev) { + evMap[hex.Enc(ev.Id)] = ev + // add event counter key to accessed + // ser := serial.FromKey(eventKey) + // accessed[string(ser.Val)] = struct{}{} + // if pointers.Present(f.Limit) { + // *f.Limit-- + // if *f.Limit <= 0 { + // log.I.F("found events: %d", len(evMap)) + // return + // } + // } + // if there is no limit, cap it at the MaxLimit, assume this was the + // intent or the client is erroneous, if any limit greater is + // requested this will be used instead as the previous clause. + if len(evMap) >= r.MaxLimit { + // log.F.ToSliceOfBytes("found MaxLimit events: %d", len(evMap)) + return + } + } + } + return + }, + ) + if err != nil { + // this means shutdown, probably + if errors.Is(err, badger.ErrDBClosed) { + return + } + } + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + } + // log.I.S(evMap) + if len(evMap) > 0 { + for i := range evMap { + if len(evMap[i].Pubkey) == 0 { + log.I.S(evMap[i]) + continue + } + evs = append(evs, evMap[i]) + } + log.I.S(len(evs)) + sort.Sort(event.Descending(evs)) + if len(evs) > limit { + evs = evs[:limit] + } + seen := make(map[uint16]struct{}) + var tmp event.S + for _, ev := range evs { + log.I.F("%d", ev.CreatedAt.V) + if ev.Kind.IsReplaceable() { + // remove all but newest versions of replaceable + if _, ok := seen[ev.Kind.K]; ok { + // already seen this replaceable avent, skip + continue + } + seen[ev.Kind.K] = struct{}{} + } + tmp = append(tmp, ev) + } + evs = tmp + // log.I.S(evs) + // log.F.C(func() string { + // evIds := make([]string, len(evs)) + // for i, ev := range evs { + // evIds[i] = hex.Enc(ev.Id) + // } + // heading := fmt.Sprintf("query complete,%d events found,%s", len(evs), + // f.Serialize()) + // return fmt.Sprintf("%s\nevents,%v", heading, evIds) + // }) + // bump the access times on all retrieved events. do this in a goroutine so the + // user's events are delivered immediately + // go func() { + // for ser := range accessed { + // seri := serial.New([]byte(ser)) + // now := timestamp.Now() + // err = r.Update(func(txn *badger.Txn) (err error) { + // key := GetCounterKey(seri) + // it := txn.NewIterator(badger.IteratorOptions{}) + // defer it.Close() + // if it.Seek(key); it.ValidForPrefix(key) { + // // update access record + // if err = txn.Set(key, now.Bytes()); chk.E(err) { + // return + // } + // } + // // log.F.Ln("last access for", seri.Uint64(), now.U64()) + // return nil + // }) + // } + // }() + } else { + log.T.F("no events found,%s", f.Serialize()) + } + // } + return +} diff --git a/ratel/queryforids.go b/ratel/queryforids.go new file mode 100644 index 0000000..c184dd2 --- /dev/null +++ b/ratel/queryforids.go @@ -0,0 +1,209 @@ +package ratel + +import ( + "errors" + "orly.dev/chk" + "orly.dev/log" + "strconv" + "time" + + "github.com/dgraph-io/badger/v4" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/filter" + "orly.dev/interfaces/store" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/fullid" + "orly.dev/ratel/keys/fullpubkey" + "orly.dev/ratel/keys/index" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/realy/pointers" + "orly.dev/tag" + "orly.dev/timestamp" +) + +func (r *T) QueryForIds(c context.T, f *filter.F) ( + founds []store.IdPkTs, err error, +) { + log.T.F("QueryForIds %s\n", f.Serialize()) + var queries []query + var ext *filter.F + var since uint64 + if queries, ext, since, err = PrepareQueries(f); chk.E(err) { + return + } + // search for the keys generated from the filter + var total int + eventKeys := make(map[string]struct{}) + var serials []*serial.T + for _, q := range queries { + err = r.View( + func(txn *badger.Txn) (err error) { + // iterate only through keys and in reverse order + opts := badger.IteratorOptions{ + Reverse: true, + } + it := txn.NewIterator(opts) + defer it.Close() + for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() { + item := it.Item() + k := item.KeyCopy(nil) + if !q.skipTS { + if len(k) < createdat.Len+serial.Len { + continue + } + createdAt := createdat.FromKey(k) + if createdAt.Val.U64() < since { + break + } + } + ser := serial.FromKey(k) + serials = append(serials, ser) + idx := prefixes.Event.Key(ser) + eventKeys[string(idx)] = struct{}{} + total++ + // some queries just produce stupid amounts of matches, they are a resource + // exhaustion attack vector and only spiders make them + if total > 5000 { + return + } + } + return + }, + ) + if chk.E(err) { + // this means shutdown, probably + if errors.Is(err, badger.ErrDBClosed) { + return + } + } + } + log.T.F( + "found %d event indexes from %d queries", len(eventKeys), len(queries), + ) + // l2Map := make(map[string]*event.F) // todo: this is not being used, it should be + var delEvs [][]byte + defer func() { + for _, d := range delEvs { + // if events were found that should be deleted, delete them + chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d))) + } + }() + accessed := make(map[string]struct{}) + if ext != nil { + // we have to fetch the event + for ek := range eventKeys { + eventKey := []byte(ek) + err = r.View( + func(txn *badger.Txn) (err error) { + opts := badger.IteratorOptions{Reverse: true} + it := txn.NewIterator(opts) + defer it.Close() + done: + for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() { + item := it.Item() + // if r.HasL2 && item.ValueSize() == sha256.Size { + // // this is a stub entry that indicates an L2 needs to be accessed for + // // it, so we populate only the event.F.Id and return the result, the + // // caller will expect this as a signal to query the L2 event store. + // var eventValue []byte + // ev := &event.F{} + // if eventValue, err = item.ValueCopy(nil); chk.E(err) { + // continue + // } + // log.F.F("found event stub %0x must seek in L2", eventValue) + // ev.Id = eventValue + // l2Map[hex.Enc(ev.Id)] = ev + // return + // } + ev := &event.E{} + if err = item.Value( + func(eventValue []byte) (err error) { + var rem []byte + if rem, err = r.Unmarshal( + ev, eventValue, + ); chk.E(err) { + return + } + if len(rem) > 0 { + log.T.S(rem) + } + if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil { + var exp uint64 + if exp, err = strconv.ParseUint( + string(et.Value()), 10, + 64, + ); chk.E(err) { + return + } + if int64(exp) > time.Now().Unix() { + // this needs to be deleted + delEvs = append(delEvs, ev.Id) + return + } + } + return + }, + ); chk.E(err) { + continue + } + if ev == nil { + continue + } + if ext.Matches(ev) { + // add event counter key to accessed + ser := serial.FromKey(eventKey) + serials = append(serials, ser) + accessed[string(ser.Val)] = struct{}{} + if pointers.Present(f.Limit) { + if *f.Limit < uint(len(serials)) { + // done + break done + } + } + } + } + return + }, + ) + if err != nil { + // this means shutdown, probably + if errors.Is(err, badger.ErrDBClosed) { + return + } + } + } + } + for _, ser := range serials { + err = r.View( + func(txn *badger.Txn) (err error) { + prf := prefixes.FullIndex.Key(ser) + opts := badger.IteratorOptions{Prefix: prf} + it := txn.NewIterator(opts) + defer it.Close() + it.Seek(prf) + if it.ValidForPrefix(prf) { + k := it.Item().KeyCopy(nil) + id := fullid.New() + ts := createdat.New(timestamp.New()) + pk := fullpubkey.New() + keys.Read(k, index.New(0), serial.New(nil), id, pk, ts) + ff := store.IdPkTs{ + Ts: ts.Val.I64(), + Id: id.Val, + Pub: pk.Val, + Ser: ser.Uint64(), + } + founds = append(founds, ff) + } + return + }, + ) + } + // log.I.S(founds) + return +} diff --git a/ratel/rescan.go b/ratel/rescan.go new file mode 100644 index 0000000..3f6e662 --- /dev/null +++ b/ratel/rescan.go @@ -0,0 +1,82 @@ +package ratel + +import ( + "github.com/dgraph-io/badger/v4" + "orly.dev/chk" + "orly.dev/log" + + "orly.dev/event" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/prefixes" + "orly.dev/sha256" + "orly.dev/timestamp" +) + +// Rescan regenerates all indexes of events to add new indexes in a new version. +func (r *T) Rescan() (err error) { + var evKeys [][]byte + err = r.View( + func(txn *badger.Txn) (err error) { + prf := []byte{prefixes.Event.B()} + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + for it.Rewind(); it.ValidForPrefix(prf); it.Next() { + item := it.Item() + if it.Item().ValueSize() == sha256.Size { + continue + } + evKeys = append(evKeys, item.KeyCopy(nil)) + } + return + }, + ) + var i int + var key []byte + for i, key = range evKeys { + err = r.Update( + func(txn *badger.Txn) (err error) { + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + it.Seek(key) + if it.Valid() { + item := it.Item() + var evB []byte + if evB, err = item.ValueCopy(nil); chk.E(err) { + return + } + ser := serial.FromKey(key) + var rem []byte + ev := &event.E{} + if rem, err = r.Unmarshal(ev, evB); chk.E(err) { + return + } + if len(rem) > 0 { + log.T.S(rem) + } + // add the indexes + var indexKeys [][]byte + indexKeys = GetIndexKeysForEvent(ev, ser) + // log.I.S(indexKeys) + for _, k := range indexKeys { + var val []byte + if k[0] == prefixes.Counter.B() { + val = keys.Write(createdat.New(timestamp.Now())) + } + if err = txn.Set(k, val); chk.E(err) { + return + } + } + if i%1000 == 0 { + log.I.F("rescanned %d events", i) + } + } + return + }, + ) + } + chk.E(err) + log.I.F("completed rescanning %d events", i) + return err +} diff --git a/ratel/saveevent.go b/ratel/saveevent.go new file mode 100644 index 0000000..5e3b48b --- /dev/null +++ b/ratel/saveevent.go @@ -0,0 +1,155 @@ +package ratel + +import ( + "github.com/dgraph-io/badger/v4" + "orly.dev/chk" + "orly.dev/errorf" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/eventid" + eventstore "orly.dev/interfaces/store" + "orly.dev/ratel/keys" + "orly.dev/ratel/keys/createdat" + "orly.dev/ratel/keys/id" + "orly.dev/ratel/keys/index" + "orly.dev/ratel/keys/serial" + "orly.dev/ratel/keys/tombstone" + "orly.dev/ratel/prefixes" + "orly.dev/sha256" + "orly.dev/timestamp" +) + +func (r *T) SaveEvent(c context.T, ev *event.E) ( + keySize, ValueSize int, err error, +) { + if ev.Kind.IsEphemeral() { + // log.T.ToSliceOfBytes("not saving ephemeral event\n%s", ev.Serialize()) + return + } + // make sure Close waits for this to complete + r.WG.Add(1) + defer r.WG.Done() + // first, search to see if the event Id already exists. + var foundSerial []byte + var deleted bool + seri := serial.New(nil) + var tsPrefixBytes []byte + err = r.View( + func(txn *badger.Txn) (err error) { + // query event by id to ensure we don't try to save duplicates + prf := prefixes.Id.Key(id.New(eventid.NewWith(ev.Id))) + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + it.Seek(prf) + if it.ValidForPrefix(prf) { + var k []byte + // get the serial + k = it.Item().Key() + // copy serial out + keys.Read(k, index.Empty(), id.New(&eventid.T{}), seri) + // save into foundSerial + foundSerial = seri.Val + } + // if the event was deleted we don't want to save it again + // In deleteevent.go, the tombstone key is created with: + // tombstoneKey = prefixes.Tombstone.Key(ts, createdat.New(timestamp.Now())) + // where ts is created with tombstone.NewWith(ev.EventId()) + // We need to use just the prefix part (without the timestamp) to find any tombstone for this event + tsPrefixBytes = []byte{prefixes.Tombstone.B()} + tsBytes := tombstone.Make(eventid.NewWith(ev.Id)) + tsPrefixBytes = append(tsPrefixBytes, tsBytes...) + it2 := txn.NewIterator(badger.IteratorOptions{}) + defer it2.Close() + it2.Rewind() + it2.Seek(tsPrefixBytes) + if it2.ValidForPrefix(tsPrefixBytes) { + deleted = true + } + return + }, + ) + if chk.E(err) { + return + } + if deleted { + err = errorf.W( + "tombstone found %0x, event will not be saved", tsPrefixBytes, + ) + return + } + if foundSerial != nil { + // log.D.ToSliceOfBytes("found possible duplicate or stub for %s", ev.Serialize()) + err = r.Update( + func(txn *badger.Txn) (err error) { + // retrieve the event record + evKey := keys.Write(index.New(prefixes.Event), seri) + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + it.Seek(evKey) + if it.ValidForPrefix(evKey) { + if it.Item().ValueSize() != sha256.Size { + // not a stub, we already have it + // log.D.ToSliceOfBytes("duplicate event %0x", ev.Id) + return eventstore.ErrDupEvent + } + // we only need to restore the event binary and write the access counter key + // encode to binary + var bin []byte + bin = r.Marshal(ev, bin) + if err = txn.Set(it.Item().Key(), bin); chk.E(err) { + return + } + // // bump counter key + // counterKey := GetCounterKey(seri) + // val := keys.Write(createdat.New(timestamp.Now())) + // if err = txn.Set(counterKey, val); chk.E(err) { + // return + // } + return + } + return + }, + ) + // if it was a dupe, we are done. + if err != nil { + return + } + return + } + var bin []byte + bin = r.Marshal(ev, bin) + // otherwise, save new event record. + if err = r.Update( + func(txn *badger.Txn) (err error) { + var idx []byte + var ser *serial.T + idx, ser = r.SerialKey() + // encode to binary + // raw event store + if err = txn.Set(idx, bin); chk.E(err) { + return + } + // add the indexes + var indexKeys [][]byte + indexKeys = GetIndexKeysForEvent(ev, ser) + // log.I.S(indexKeys) + for _, k := range indexKeys { + var val []byte + if k[0] == prefixes.Counter.B() { + val = keys.Write(createdat.New(timestamp.Now())) + } + if err = txn.Set(k, val); chk.E(err) { + return + } + } + // log.D.ToSliceOfBytes("saved event to ratel %s:\n%s", r.dataDir, ev.Serialize()) + return + }, + ); chk.E(err) { + return + } + return +} + +func (r *T) Sync() (err error) { return r.DB.Sync() } diff --git a/readme.adoc b/readme.adoc new file mode 100644 index 0000000..ffc2e53 --- /dev/null +++ b/readme.adoc @@ -0,0 +1,125 @@ += realy.lol +:toc: +:note-caption: note 👉 + +image:https://img.shields.io/badge/godoc-documentation-blue.svg[Documentation,link=https://pkg.go.dev/realy.lol] +image:https://img.shields.io/badge/donate-geyser_crowdfunding_project_page-orange.svg[Support this project,link=https://geyser.fund/project/realy] +zap me: ⚡️mleku@getalby.com + +image:./realy.png[realy.png] + +nostr relay built from a heavily modified fork of https://github.com/nbd-wtf/go-nostr[nbd-wtf/go-nostr] +and https://github.com/fiatjaf/relayer[fiatjaf/relayer] aimed at maximum performance, simplicity and memory efficiency. + +== Features + +* new HTTP REST API available in addition to standard websocket access, simplifying writing applications and tools, and building a standard API method set for future extensions for more flexible features +* a lot of other bits and pieces accumulated from nearly 8 years of working with Go, logging and run control, XDG user data directories (windows, mac, linux, android) +* a cleaned up and unified fork of the btcd/dcred BIP-340 signatures, including the use of bitcoin core's BIP-340 implementation (more than 4x faster than btcd) (todo: ECDH from the C library tbd) +* AVX/AVX2 optimized SHA256 and SIMD hex encoder +* https://github.com/bitcoin/secp256k1[libsecp256k1]-enabled signature and signature verification (see link:p256k/README.md[here]) +* efficient, mutable byte slice based hash/pubkey/signature encoding in memory (zero allocation decode from wire, can tolerate whitespace, at a speed penalty) +* custom badger based event store with an optional garbage collector that deletes least recent once the store exceeds a specified size access, and data encoded using a more space efficient format based on the nostr canonical json array event form +* link:cmd/vainstr[vainstr] vanity npub generator that can mine a 5 letter suffix in around 15 minutes on a 6 core Ryzen 5 processor using the CGO bitcoin core signature library +* reverse proxy tool link:cmd/lerproxy[lerproxy] with support for Go vanity imports and https://github.com/nostr-protocol/nips/blob/master/05.md[nip-05] npub DNS verification and own TLS certificates +* link:https://github.com/nostr-protocol/nips/blob/master/98.md[nip-98] implementation with new expiring variant for vanilla HTTP tools and browsers. + +== Building + +If you just want to make it run from source, you should check out a tagged version. + +The commits on these tags will explain what state the commit is at. + +In general, the most stable versions are new minor tags, eg v1.2.0 or v1.23.0, and minor patch versions may not be stable and occasionally may not compile (not very often). + +Go 1.24 or better is recommended. +Go 1.23.1 is minimum required. + +== Repository Policy + +In general, the main `dev` branch will build, but occasionally may not. +It is where new commits are added once they are working, mostly, and allows people to easily see ongoing activity. + +WARNING: IT IS NOT GUARANTEED TO BE STABLE... but it is getting there. + +Use tags to pin to a specific version. +Tags are in standard Go semver pattern `vX.X.X` + +== CGO and secp256k1 signatures library + +By default, Go will usually be configured with `CGO_ENABLED=1`. +This selects the use of the C library from bitcoin core, which does signatures and verifications much faster (4x and better) but complicates the build process as you have to install the library beforehand. +There is instructions in link:p256k/README.md[p256k/README.md] for doing this. + +=== Disabling CGO + +In order to disable the use of this, you must set the environment variable `CGO_ENABLED=0` and it the Go compiler will automatically revert to using the btcec based secp256k1 signatures library. + +---- +export CGO_ENABLED=0 +cd cmd/realy +go build . +---- + +This will build the binary and place it in cmd/realy and then you can move it where you like. + +=== Static build + +To produce a static binary, whether you use the CGO secp256k1 or disable CGO as above: + +---- +go build --ldflags '-extldflags "-static"' -o ~/bin/realy ./cmd/realy/. +---- + +will place it into your `~/bin/` directory, and it will work on any system of the same architecture with the same glibc major version (has been 2 for a long time). + +== Configuration + +The default will run the relay with default settings, which will not be what you want. + +=== Show Current Configuration + +To see the current active configuration: + +---- +realy env +---- + +=== Create Persistent Configuration + +This output can be directed to the profile location to make the settings editable without manually setting them on the commandline: + +---- +realy env > $HOME/.config/realy/.env +---- + +You can now edit this file to alter the configuration. + +Regarding the configuration system, this is an element of many servers that is absurdly complex, and for which reason Realy does not use a complicated scheme, a simple library that allows automatic configuration of a series of options, added a simple info print: + +---- +realy help +---- + +will show you the instructions, and the one simple extension of being able to use a standard formated .env file to configure all the options for an instance. + +=== Database Storage Location + +The database is stored in `$HOME/.local/share/realy` and if need be you can stop `realy` delete everything in this directory and restart to "nuke" the database. Note that this is now available through the link:#_simplified_nostr[Simplified Nostr] HTTP OpenAPI endpoint on `/nuke` + +== API support + +=== Standard Nostr NIPs + +`realy` already accepts all the standard NIPs mainly nip-01 and many other types are recognised such an NIP-42 auth messages and it uses and parses relay lists, and all that other stuff. +It has maybe the most faithful implementation of NIP-42 but most clients don't correctly implement it, or at all. +Which is sad, but what can you do with stupid people? + +[#_simplified_nostr] +=== Simplified Nostr + +Rather than write a text that will likely fall out of date very quickly, simply run `realy` and visit its listener address (eg link:http://localhost:3334/api[http://localhost:3334/api]) to see the full documentation. + +By default this presents you with a Scalar Docs page that lets you browse the available API methods and shows examples in many forms including cURL and most languages how to call and what data needs to go in headers, body, and parameters and what results will come back. + +There is even a subscription endpoint, also, which uses SSE format and does not require a websocket upgrade to work with. \ No newline at end of file diff --git a/readme.md b/readme.md deleted file mode 100644 index 3efd0a7..0000000 --- a/readme.md +++ /dev/null @@ -1,5 +0,0 @@ -# orly - -![orly.png](orly.png) - -a super simple, fast nostr relay \ No newline at end of file diff --git a/orly.png b/realy.png similarity index 100% rename from orly.png rename to realy.png diff --git a/realy.service b/realy.service new file mode 100644 index 0000000..c445493 --- /dev/null +++ b/realy.service @@ -0,0 +1,16 @@ +# systemd unit to run realy as a service +[Unit] +Description=realy + +[Service] +Type=simple +User=mleku +ExecStart=/home/mleku/.local/bin/realy +Restart=always +Wants=network-online.target +# waits for wireguard service to come up before starting, remove if running it directly on an +# internet routeable connection +After=network.target network-online.target wg-quick@wg0.service + +[Install] +WantedBy=multi-user.target diff --git a/realy/addEvent.go b/realy/addEvent.go new file mode 100644 index 0000000..e32e397 --- /dev/null +++ b/realy/addEvent.go @@ -0,0 +1,75 @@ +package realy + +import ( + "errors" + "net/http" + "orly.dev/log" + "strings" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/interfaces/store" + "orly.dev/normalize" + "orly.dev/relay" + "orly.dev/socketapi" +) + +func (s *Server) addEvent( + c context.T, rl relay.I, ev *event.E, + hr *http.Request, origin string, + authedPubkey []byte, +) (accepted bool, message []byte) { + + if ev == nil { + return false, normalize.Invalid.F("empty event") + } + // sto := rl.Storage() + // advancedSaver, _ := sto.(relay.AdvancedSaver) + // don't allow storing event with protected marker as per nip-70 with auth enabled. + // if (s.authRequired || !s.publicReadable) && ev.Tags.ContainsProtectedMarker() { + // if len(authedPubkey) == 0 || !bytes.Equal(ev.Pubkey, authedPubkey) { + // return false, + // []byte(fmt.Sprintf("event with relay marker tag '-' (nip-70 protected event) "+ + // "may only be published by matching npub: %0x is not %0x", + // authedPubkey, ev.Pubkey)) + // } + // } + if ev.Kind.IsEphemeral() { + } else { + // if advancedSaver != nil { + // advancedSaver.BeforeSave(c, ev) + // } + if saveErr := s.Publish(c, ev); saveErr != nil { + if errors.Is(saveErr, store.ErrDupEvent) { + return false, normalize.Error.F(saveErr.Error()) + } + errmsg := saveErr.Error() + if socketapi.NIP20prefixmatcher.MatchString(errmsg) { + if strings.Contains(errmsg, "tombstone") { + return false, normalize.Blocked.F("event was deleted, not storing it again") + } + if strings.HasPrefix(errmsg, string(normalize.Blocked)) { + return false, []byte(errmsg) + } + return false, normalize.Error.F(errmsg) + } else { + return false, normalize.Error.F("failed to save (%s)", errmsg) + } + } + log.I.F( + "event id %0x stored ephemeral: %s", ev.Id, ev.Kind.IsEphemeral(), + ) + // if advancedSaver != nil { + // advancedSaver.AfterSave(ev) + // } + } + // var authRequired bool + // if ar, ok := rl.(relay.Authenticator); ok { + // authRequired = ar.AuthRequired() + // } + // notify subscribers + s.listeners.Deliver(ev) + accepted = true + log.I.S(ev) + return +} diff --git a/realy/auth.go b/realy/auth.go new file mode 100644 index 0000000..ce72ae3 --- /dev/null +++ b/realy/auth.go @@ -0,0 +1,32 @@ +package realy + +//func (s *Server) adminAuth(r *http.Request, +// tolerance ...time.Duration) (authed bool, pubkey []byte) { +// var valid bool +// var err error +// var tolerate time.Duration +// if len(tolerance) > 0 { +// tolerate = tolerance[0] +// } +// if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) { +// return +// } +// if !valid { +// return +// } +// // check admins pubkey list +// for _, v := range s.admins { +// if bytes.Equal(v.Pub(), pubkey) { +// authed = true +// return +// } +// } +// return +//} + +//func (s *Server) unauthorized(w http.ResponseWriter, r *http.Request) { +// w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`) +// http.Error(w, "Unauthorized", http.StatusUnauthorized) +// fmt.Fprintf(w, +// "not authorized, either you did not provide an auth token or what you provided does not grant access\n") +//} diff --git a/realy/config/config.go b/realy/config/config.go new file mode 100644 index 0000000..89e174e --- /dev/null +++ b/realy/config/config.go @@ -0,0 +1,217 @@ +// Package config provides a go-simpler.org/env configuration table and helpers +// for working with the list of key/value lists stored in .env files. +package config + +import ( + "fmt" + "io" + "orly.dev/chk" + "orly.dev/log" + "orly.dev/version" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/adrg/xdg" + "go-simpler.org/env" + + "orly.dev/apputil" + env2 "orly.dev/env" +) + +// C is the configuration for realy relay. These are read from the environment if present, or if +// a .env file is found in ~/.config/realy/ that is read instead and overrides anything else. +type C struct { + AppName string `env:"ORLY_APP_NAME" default:"realy"` + Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value... style"` + State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces"` + DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the ratel event store"` + Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"` + Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"` + // AdminNpubs string `env:"ORLY_ADMIN_NPUBS" usage:"comma separated lists of hex or bech32 format pubkeys of authorised administrators for the http admin endpoints"` + LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"` + DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"` + // AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"requires auth for all access"` + // PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allows all read access, overriding read access limit from ORLY_AUTH_REQUIRED"` + // Owners []string `env:"ORLY_OWNERS" usage:"comma separated list of npubs of users in hex or bech32 format whose follow and mute list dictate accepting requests and events with AUTH_REQUIRED enabled - follows and follows follows are allowed to read/write, owners mutes events are rejected"` + // DBSizeLimit int `env:"ORLY_DB_SIZE_LIMIT" default:"0" usage:"the number of gigabytes (1,000,000,000 bytes) we want to keep the data store from exceeding, 0 means disabled"` + // DBLowWater int `env:"ORLY_DB_LOW_WATER" default:"60" usage:"the percentage of DBSizeLimit a GC run will reduce the used storage down to"` + // DBHighWater int `env:"ORLY_DB_HIGH_WATER" default:"80" usage:"the trigger point at which a GC run should start if exceeded"` + // GCFrequency int `env:"ORLY_GC_FREQUENCY" default:"3600" usage:"the frequency of checks of the current utilisation in minutes"` + Pprof bool `env:"ORLY_PPROF" default:"false" usage:"enable pprof on 127.0.0.1:6060"` + // MemLimit int `env:"ORLY_MEMLIMIT" default:"250000000" usage:"set memory limit, default is 250Mb"` + // UseCompact bool `env:"ORLY_USE_COMPACT" default:"false" usage:"use the compact database encoding for the ratel event store"` + // Compression string `env:"ORLY_COMPRESSION" default:"none" usage:"compress the database, [none|snappy|zstd]"` + // NWC st `env:"NWC" usage:"NWC connection string for relay to interact with an NWC enabled wallet"` // todo +} + +// New creates a new config.C. +func New() (cfg *C, err error) { + cfg = &C{} + if err = env.Load(cfg, &env.Options{SliceSep: ","}); chk.T(err) { + return + } + if cfg.Config == "" { + cfg.Config = filepath.Join(xdg.ConfigHome, cfg.AppName) + } + if cfg.State == "" { + cfg.State = filepath.Join(xdg.StateHome, cfg.AppName) + } + if cfg.DataDir == "" { + cfg.DataDir = filepath.Join(xdg.DataHome, cfg.AppName) + } + envPath := filepath.Join(cfg.Config, ".env") + if apputil.FileExists(envPath) { + log.I.F("loading config from %s", envPath) + var e env2.Env + if e, err = env2.GetEnv(envPath); chk.T(err) { + return + } + if err = env.Load( + cfg, &env.Options{SliceSep: ",", Source: e}, + ); chk.E(err) { + return + } + // var owners []string + // // remove empties if any + // for _, o := range cfg.Owners { + // if len(o) > 0 { + // owners = append(owners, o) + // } + // } + // cfg.Owners = owners + } + return +} + +// HelpRequested returns true if any of the common types of help invocation are +// found as the first command line parameter/flag. +func HelpRequested() (help bool) { + if len(os.Args) > 1 { + switch strings.ToLower(os.Args[1]) { + case "help", "-h", "--h", "-help", "--help", "?": + help = true + } + } + return +} + +// GetEnv processes os.Args to detect a request for printing the current settings as a list of +// environment variable key/values. +func GetEnv() (requested bool) { + if len(os.Args) > 1 { + switch strings.ToLower(os.Args[1]) { + case "env": + requested = true + } + } + return +} + +// KV is a key/value pair. +type KV struct{ Key, Value string } + +// KVSlice is a collection of key/value pairs. +type KVSlice []KV + +func (kv KVSlice) Len() int { return len(kv) } +func (kv KVSlice) Less(i, j int) bool { return kv[i].Key < kv[j].Key } +func (kv KVSlice) Swap(i, j int) { kv[i], kv[j] = kv[j], kv[i] } + +// Composit merges two KVSlice together, replacing the values of earlier keys with same named +// KV items later in the slice (enabling compositing two together as a .env, as well as them +// being composed as structs. +func (kv KVSlice) Composit(kv2 KVSlice) (out KVSlice) { + // duplicate the initial KVSlice + for _, p := range kv { + out = append(out, p) + } +out: + for i, p := range kv2 { + for j, q := range out { + // if the key is repeated, replace the value + if p.Key == q.Key { + out[j].Value = kv2[i].Value + continue out + } + } + out = append(out, p) + } + return +} + +// EnvKV turns a struct with `env` keys (used with go-simpler/env) into a standard formatted +// environment variable key/value pair list, one per line. Note you must dereference a pointer +// type to use this. This allows the composition of the config in this file with an extended +// form with a customized variant of realy to produce correct environment variables both read +// and write. +func EnvKV(cfg any) (m KVSlice) { + t := reflect.TypeOf(cfg) + for i := 0; i < t.NumField(); i++ { + k := t.Field(i).Tag.Get("env") + v := reflect.ValueOf(cfg).Field(i).Interface() + var val string + switch v.(type) { + case string: + val = v.(string) + case int, bool, time.Duration: + val = fmt.Sprint(v) + case []string: + arr := v.([]string) + if len(arr) > 0 { + val = strings.Join(arr, ",") + } + } + // this can happen with embedded structs + if k == "" { + continue + } + m = append(m, KV{k, val}) + } + return +} + +// PrintEnv renders the key/values of a config.C to a provided io.Writer. +func PrintEnv(cfg *C, printer io.Writer) { + kvs := EnvKV(*cfg) + sort.Sort(kvs) + for _, v := range kvs { + _, _ = fmt.Fprintf(printer, "%s=%s\n", v.Key, v.Value) + } +} + +// PrintHelp outputs a help text listing the configuration options and default +// values to a provided io.Writer (usually os.Stderr or os.Stdout). +func PrintHelp(cfg *C, printer io.Writer) { + _, _ = fmt.Fprintf( + printer, + "%s %s\n\n", cfg.AppName, version.V, + ) + + _, _ = fmt.Fprintf( + printer, + "Environment variables that configure %s:\n\n", cfg.AppName, + ) + + env.Usage(cfg, printer, &env.Options{SliceSep: ","}) + _, _ = fmt.Fprintf( + printer, + "\nCLI parameter 'help' also prints this information\n"+ + "\n.env file found at the path %s will be automatically "+ + "loaded for configuration.\nset these two variables for a custom load path,"+ + " this file will be created on first startup.\nenvironment overrides it and "+ + "you can also edit the file to set configuration options\n\n"+ + "use the parameter 'env' to print out the current configuration to the terminal\n\n"+ + "set the environment using\n\n\t%s env > %s/.env\n", os.Args[0], + cfg.Config, + cfg.Config, + ) + + fmt.Fprintf(printer, "\ncurrent configuration:\n\n") + PrintEnv(cfg, printer) + fmt.Fprintln(printer) + return +} diff --git a/realy/disconnect.go b/realy/disconnect.go new file mode 100644 index 0000000..3474180 --- /dev/null +++ b/realy/disconnect.go @@ -0,0 +1,10 @@ +package realy + +import "orly.dev/log" + +func (s *Server) disconnect() { + for client := range s.clients { + log.I.F("closing client %s", client.RemoteAddr()) + client.Close() + } +} diff --git a/realy/doc.go b/realy/doc.go new file mode 100644 index 0000000..626ac42 --- /dev/null +++ b/realy/doc.go @@ -0,0 +1,3 @@ +// Package realy implements a nostr relay including the new HTTP API built with +// huma. +package realy diff --git a/realy/handleRelayinfo.go b/realy/handleRelayinfo.go new file mode 100644 index 0000000..ea01f98 --- /dev/null +++ b/realy/handleRelayinfo.go @@ -0,0 +1,63 @@ +package realy + +import ( + "encoding/json" + "net/http" + "orly.dev/chk" + "orly.dev/log" + "orly.dev/version" + "sort" + + "orly.dev/relay" + "orly.dev/relayinfo" +) + +func (s *Server) handleRelayInfo(w http.ResponseWriter, r *http.Request) { + r.Header.Set("Content-Type", "application/json") + log.I.Ln("handling relay information document") + var info *relayinfo.T + if informationer, ok := s.relay.(relay.Informationer); ok { + info = informationer.GetNIP11InformationDocument() + } else { + supportedNIPs := relayinfo.GetList( + relayinfo.BasicProtocol, + relayinfo.EncryptedDirectMessage, + relayinfo.EventDeletion, + relayinfo.RelayInformationDocument, + relayinfo.GenericTagQueries, + relayinfo.NostrMarketplace, + relayinfo.EventTreatment, + relayinfo.CommandResults, + relayinfo.ParameterizedReplaceableEvents, + relayinfo.ExpirationTimestamp, + relayinfo.ProtectedEvents, + relayinfo.RelayListMetadata, + ) + // var auther relay.Authenticator + // if auther, ok = s.relay.(relay.Authenticator); ok && auther.ServiceUrl(r) != "" { + // supportedNIPs = append(supportedNIPs, relayinfo.Authentication.N()) + // } + // var storage store.I + // if storage = s.relay.Storage(); storage != nil { + // if _, ok = storage.(relay.EventCounter); ok { + // supportedNIPs = append(supportedNIPs, relayinfo.CountingResults.N()) + // } + // } + sort.Sort(supportedNIPs) + log.T.Ln("supported NIPs", supportedNIPs) + info = &relayinfo.T{ + Name: s.relay.Name(), + Description: version.Description, + Nips: supportedNIPs, Software: version.URL, + Version: version.V, + Limitation: relayinfo.Limits{ + // MaxLimit: s.maxLimit, + // AuthRequired: s.authRequired, + // RestrictedWrites: !s.publicReadable || s.authRequired || len(s.owners) > 0, + }, + Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png", + } + } + if err := json.NewEncoder(w).Encode(info); chk.E(err) { + } +} diff --git a/realy/handleWebsocket.go b/realy/handleWebsocket.go new file mode 100644 index 0000000..9248011 --- /dev/null +++ b/realy/handleWebsocket.go @@ -0,0 +1,13 @@ +package realy + +import ( + "net/http" + + "orly.dev/socketapi" +) + +func (s *Server) handleWebsocket(w http.ResponseWriter, r *http.Request) { + a := &socketapi.A{Server: s} // ClientsMu: &s.clientsMu, Clients: s.clients, + + a.Serve(w, r, s) +} diff --git a/realy/helpers/helpers.go b/realy/helpers/helpers.go new file mode 100644 index 0000000..21afb55 --- /dev/null +++ b/realy/helpers/helpers.go @@ -0,0 +1,34 @@ +package helpers + +import ( + "net/http" + "strings" +) + +func GenerateDescription(text string, scopes []string) string { + if len(scopes) == 0 { + return text + } + result := make([]string, 0) + for _, value := range scopes { + result = append(result, "`"+value+"`") + } + return text + "

**Scopes**
" + strings.Join(result, ", ") +} + +func GetRemoteFromReq(r *http.Request) (rr string) { + // reverse proxy should populate this field so we see the remote not the proxy + rem := r.Header.Get("X-Forwarded-For") + if rem == "" { + rr = r.RemoteAddr + } else { + splitted := strings.Split(rem, " ") + if len(splitted) == 1 { + rr = splitted[0] + } + if len(splitted) == 2 { + rr = splitted[1] + } + } + return +} diff --git a/realy/interfaces/interfaces.go b/realy/interfaces/interfaces.go new file mode 100644 index 0000000..1de1314 --- /dev/null +++ b/realy/interfaces/interfaces.go @@ -0,0 +1,38 @@ +package interfaces + +import ( + "net/http" + "orly.dev/context" + "orly.dev/event" + "orly.dev/interfaces/store" + "orly.dev/realy/publish" + "orly.dev/relay" +) + +type Server interface { + AddEvent( + c context.T, rl relay.I, ev *event.E, hr *http.Request, + origin string, authedPubkey []byte, + ) ( + accepted bool, + message []byte, + ) + Context() context.T + Disconnect() + Publisher() *publish.S + Publish(c context.T, evt *event.E) (err error) + Relay() relay.I + Shutdown() + Storage() store.I + // Options() *options.T + // AcceptEvent( + // c context.T, ev *event.E, hr *http.Request, origin string, + // authedPubkey []byte) (accept bool, notice string, afterSave func()) + // AdminAuth(r *http.Request, + // tolerance ...time.Duration) (authed bool, pubkey []byte) + // AuthRequired() bool + // Configuration() store.Configuration + // Owners() [][]byte + // PublicReadable() bool + // SetConfiguration(*store.Configuration) +} diff --git a/realy/options/options.go b/realy/options/options.go new file mode 100644 index 0000000..0d1c64b --- /dev/null +++ b/realy/options/options.go @@ -0,0 +1,33 @@ +// Package options provides some option configurations for the realy relay. +// +// None of this package is actually in use, and the skip event function has not been +// implemented. In theory this could be used for something but it currently isn't. +package options + +import ( + "orly.dev/event" +) + +type SkipEventFunc func(*event.E) bool + +// T is a collection of options. +type T struct { + // SkipEventFunc is in theory a function to test whether an event should not be sent in + // response to a query. + SkipEventFunc +} + +// O is a function that processes an options.T. +type O func(*T) + +// Default returns an uninitialised options.T. +func Default() *T { + return &T{} +} + +// WithSkipEventFunc is an options.T generator that adds a function to skip events. +func WithSkipEventFunc(skipEventFunc func(*event.E) bool) O { + return func(o *T) { + o.SkipEventFunc = skipEventFunc + } +} diff --git a/pointers/pointers.go b/realy/pointers/pointers.go similarity index 100% rename from pointers/pointers.go rename to realy/pointers/pointers.go diff --git a/publish/publisher.go b/realy/publish/publisher.go similarity index 66% rename from publish/publisher.go rename to realy/publish/publisher.go index 3617308..bb1bedf 100644 --- a/publish/publisher.go +++ b/realy/publish/publisher.go @@ -1,30 +1,26 @@ -// Package publisher is a singleton package that keeps track of subscriptions -// from relevant API connections. +// Package publisher is a singleton package that keeps track of subscriptions in +// both websockets and http SSE, including managing the authentication state of +// a connection. package publish import ( "orly.dev/event" - "orly.dev/interfaces/publisher" - "orly.dev/interfaces/typer" + "orly.dev/realy/publish/publisher" ) -var P = &S{} - -func (s *S) Register(p publisher.I) { - s.Publishers = append(s.Publishers, p) -} - // S is the control structure for the subscription management scheme. type S struct { publisher.Publishers } -// New creates a new publisher. +// New creates a new publish.S. func New(p ...publisher.I) (s *S) { s = &S{Publishers: p} return } +var _ publisher.I = &S{} + func (s *S) Type() string { return "publish" } func (s *S) Deliver(ev *event.E) { @@ -34,7 +30,7 @@ func (s *S) Deliver(ev *event.E) { } } -func (s *S) Receive(msg typer.T) { +func (s *S) Receive(msg publisher.Message) { t := msg.Type() for _, p := range s.Publishers { if p.Type() == t { diff --git a/realy/publish/publisher/interface.go b/realy/publish/publisher/interface.go new file mode 100644 index 0000000..46047a5 --- /dev/null +++ b/realy/publish/publisher/interface.go @@ -0,0 +1,17 @@ +package publisher + +import ( + "orly.dev/event" +) + +type Message interface { + Type() string +} + +type I interface { + Message + Deliver(ev *event.E) + Receive(msg Message) +} + +type Publishers []I diff --git a/realy/server-impl.go b/realy/server-impl.go new file mode 100644 index 0000000..bebfcca --- /dev/null +++ b/realy/server-impl.go @@ -0,0 +1,65 @@ +package realy + +import ( + "net/http" + "orly.dev/context" + "orly.dev/event" + "orly.dev/interfaces/store" + "orly.dev/realy/interfaces" + "orly.dev/realy/publish" + "orly.dev/relay" +) + +// func (s *Server) AdminAuth(r *http.Request, +// tolerance ...time.Duration) (authed bool, +// pubkey []byte) { +// +// return s.adminAuth(r, tolerance...) +// } + +func (s *Server) Storage() store.I { return s.relay.Storage() } + +// func (s *Server) Configuration() store.Configuration { +// s.ConfigurationMx.Lock() +// defer s.ConfigurationMx.Unlock() +// return *s.configuration +// } + +// func (s *Server) SetConfiguration(cfg *store.Configuration) { +// s.ConfigurationMx.Lock() +// s.configuration = cfg +// s.ConfigurationMx.Unlock() +// } + +func (s *Server) Relay() relay.I { return s.relay } + +func (s *Server) Disconnect() { s.disconnect() } + +func (s *Server) AddEvent( + c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string, + authedPubkey []byte, +) (accepted bool, message []byte) { + + return s.addEvent(c, rl, ev, hr, origin, authedPubkey) +} + +// func (s *Server) AcceptEvent( +// c context.T, ev *event.E, hr *http.Request, origin string, +// authedPubkey []byte) (accept bool, notice string, afterSave func()) { +// +// return s.relay.AcceptEvent(c, ev, hr, origin, authedPubkey) +// } + +func (s *Server) Publisher() *publish.S { return s.listeners } + +// func (s *Server) PublicReadable() bool { return s.publicReadable } + +func (s *Server) Context() context.T { return s.Ctx } + +// func (s *Server) Owners() [][]byte { return s.owners } + +// func (s *Server) AuthRequired() bool { return s.authRequired } + +// func (s *Server) Options() *options.T { return s.options } + +var _ interfaces.Server = &Server{} diff --git a/realy/server-publish.go b/realy/server-publish.go new file mode 100644 index 0000000..f328d49 --- /dev/null +++ b/realy/server-publish.go @@ -0,0 +1,155 @@ +package realy + +import ( + "bytes" + "errors" + "fmt" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/interfaces/store" + "orly.dev/kinds" + "orly.dev/normalize" + "orly.dev/tag" +) + +func (s *Server) Publish(c context.T, evt *event.E) (err error) { + sto := s.relay.Storage() + if evt.Kind.IsEphemeral() { + // do not store ephemeral events + return nil + + } else if evt.Kind.IsReplaceable() { + // replaceable event, delete before storing + var evs []*event.E + f := filter.New() + f.Authors = tag.New(evt.Pubkey) + f.Kinds = kinds.New(evt.Kind) + evs, err = sto.QueryEvents(c, f) + if err != nil { + return fmt.Errorf("failed to query before replacing: %w", err) + } + if len(evs) > 0 { + log.T.F("found %d possible duplicate events", len(evs)) + for _, ev := range evs { + del := true + if bytes.Equal(ev.Id, evt.Id) { + continue + } + log.I.F( + "maybe replace %s with %s", ev.Serialize(), evt.Serialize(), + ) + if ev.CreatedAt.Int() > evt.CreatedAt.Int() { + return errorf.W(string(normalize.Invalid.F("not replacing newer replaceable event"))) + } + // not deleting these events because some clients are retarded + // and the query will pull the new one but a backup can recover + // the data of old ones + if ev.Kind.IsDirectoryEvent() { + del = false + } + // defer the delete until after the save, further down, has + // completed. + if del { + defer func() { + if err != nil { + // something went wrong saving the replacement, so we won't delete + // the event. + return + } + log.T.C( + func() string { + return fmt.Sprintf( + "%s\nreplacing\n%s", evt.Serialize(), + ev.Serialize(), + ) + }, + ) + // replaceable events we don't tombstone when replacing, + // so if deleted, old versions can be restored + if err = sto.DeleteEvent(c, ev.EventId()); // true, + chk.E(err) { + return + } + }() + } + } + } + } else if evt.Kind.IsParameterizedReplaceable() { + log.I.F("parameterized replaceable %s", evt.Serialize()) + // parameterized replaceable event, delete before storing + var evs []*event.E + f := filter.New() + f.Authors = tag.New(evt.Pubkey) + f.Kinds = kinds.New(evt.Kind) + log.I.S(evt) + log.I.F( + "filter for parameterized replaceable %v %s", + f.Tags.ToStringsSlice(), + f.Serialize(), + ) + if evs, err = sto.QueryEvents(c, f); err != nil { + return errorf.E("failed to query before replacing: %w", err) + } + if len(evs) > 0 { + for _, ev := range evs { + del := true + err = nil + log.I.F( + "maybe replace %s with %s", ev.Serialize(), evt.Serialize(), + ) + if ev.CreatedAt.Int() > evt.CreatedAt.Int() { + return errorf.D(string(normalize.Blocked.F("not replacing newer parameterized replaceable event"))) + } + // not deleting these events because some clients are retarded + // and the query will pull the new one, but a backup can recover + // the data of old ones + if ev.Kind.IsDirectoryEvent() { + del = false + } + evdt := ev.Tags.GetFirst(tag.New("d")) + evtdt := evt.Tags.GetFirst(tag.New("d")) + log.I.F( + "%s != %s %v", evdt.Value(), evtdt.Value(), + !bytes.Equal(evdt.Value(), evtdt.Value()), + ) + if !bytes.Equal(evdt.Value(), evtdt.Value()) { + continue + } + if del { + defer func() { + if err != nil { + // something went wrong saving the replacement, so we won't delete + // the event. + return + } + log.T.C( + func() string { + return fmt.Sprintf( + "%s\nreplacing\n%s", evt.Serialize(), + ev.Serialize(), + ) + }, + ) + // replaceable events we don't tombstone when replacing, + // so if deleted, old versions can be restored + if err = sto.DeleteEvent(c, ev.EventId()); // true, + chk.E(err) { + return + } + }() + } + } + } + } + if _, _, err = sto.SaveEvent(c, evt); chk.E(err) && !errors.Is( + err, store.ErrDupEvent, + ) { + return errorf.E("failed to save: %w", err) + } + return +} diff --git a/realy/server.go b/realy/server.go new file mode 100644 index 0000000..628ba47 --- /dev/null +++ b/realy/server.go @@ -0,0 +1,186 @@ +package realy + +import ( + _ "embed" + "errors" + "fmt" + "net" + "net/http" + "orly.dev/chk" + "orly.dev/log" + realy_lol "orly.dev/version" + "strconv" + "sync" + "time" + + "github.com/danielgtaylor/huma/v2" + "github.com/fasthttp/websocket" + "github.com/rs/cors" + + "orly.dev/context" + "orly.dev/openapi" + "orly.dev/realy/helpers" + "orly.dev/realy/options" + "orly.dev/realy/publish" + "orly.dev/relay" + "orly.dev/signer" + "orly.dev/socketapi" +) + +type Server struct { + Ctx context.T + Cancel context.F + options *options.T + relay relay.I + clientsMu sync.Mutex + clients map[*websocket.Conn]struct{} + Addr string + mux *openapi.ServeMux + httpServer *http.Server + // authRequired bool + // publicReadable bool + // maxLimit int + // admins []signer.I + // owners [][]byte + listeners *publish.S + huma.API + // ConfigurationMx sync.Mutex + // configuration *store.Configuration +} + +type ServerParams struct { + Ctx context.T + Cancel context.F + Rl relay.I + DbPath string + MaxLimit int + Admins []signer.I + Owners [][]byte + PublicReadable bool +} + +func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) { + op := options.Default() + for _, opt := range opts { + opt(op) + } + // var authRequired bool + // if ar, ok := sp.Rl.(relay.Authenticator); ok { + // authRequired = ar.AuthRequired() + // } + if storage := sp.Rl.Storage(); storage != nil { + if err = storage.Init(sp.DbPath); chk.T(err) { + return nil, fmt.Errorf("storage init: %w", err) + } + } + serveMux := openapi.NewServeMux() + s = &Server{ + Ctx: sp.Ctx, + Cancel: sp.Cancel, + relay: sp.Rl, + clients: make(map[*websocket.Conn]struct{}), + mux: serveMux, + options: op, + // authRequired: authRequired, + // publicReadable: sp.PublicReadable, + // maxLimit: sp.MaxLimit, + // admins: sp.Admins, + // owners: sp.Rl.Owners(), + listeners: publish.New(socketapi.New(), openapi.New()), + API: openapi.NewHuma( + serveMux, sp.Rl.Name(), realy_lol.V, + realy_lol.Description, + ), + } + // register the http API operations + huma.AutoRegister(s.API, openapi.NewOperations(s)) + // load configuration if it has been set + // if c, ok := s.relay.Storage().(store.Configurationer); ok { + // s.ConfigurationMx.Lock() + // if s.configuration, err = c.GetConfiguration(); chk.E(err) { + // s.configuration = &store.Configuration{} + // } + // s.ConfigurationMx.Unlock() + // } + + go func() { + if err := s.relay.Init(); chk.E(err) { + s.Shutdown() + } + }() + // if inj, ok := s.relay.(relay.Injector); ok { + // go func() { + // for ev := range inj.InjectEvents() { + // s.listeners.Deliver(ev) + // } + // }() + // } + return s, nil +} + +// ServeHTTP implements the relay's http handler. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // remote := helpers.GetRemoteFromReq(r) + // for _, a := range s.Configuration().BlockList { + // if strings.HasPrefix(remote, a) { + // // log.W.F("rejecting request from %s because on blocklist", remote) + // http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) + // return + // } + // } + // standard nostr protocol only governs the "root" path of the relay and websockets + if r.URL.Path == "/" && r.Header.Get("Accept") == "application/nostr+json" { + s.handleRelayInfo(w, r) + return + } + if r.URL.Path == "/" && r.Header.Get("Upgrade") == "websocket" { + s.handleWebsocket(w, r) + return + } + log.I.F( + "http request: %s from %s", r.URL.String(), helpers.GetRemoteFromReq(r), + ) + s.mux.ServeHTTP(w, r) +} + +// Start up the relay. +func (s *Server) Start(host string, port int, started ...chan bool) error { + addr := net.JoinHostPort(host, strconv.Itoa(port)) + log.I.F("starting relay listener at %s", addr) + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + // s.Addr = ln.Addr().String() // todo: this doesn't seem to do anything + s.httpServer = &http.Server{ + Handler: cors.Default().Handler(s), + Addr: addr, + ReadHeaderTimeout: 7 * time.Second, + IdleTimeout: 28 * time.Second, + } + for _, startedC := range started { + close(startedC) + } + if err = s.httpServer.Serve(ln); errors.Is(err, http.ErrServerClosed) { + } else if err != nil { + } + return nil +} + +// Shutdown the relay. +func (s *Server) Shutdown() { + log.I.Ln("shutting down relay") + s.Cancel() + log.W.Ln("closing event store") + chk.E(s.relay.Storage().Close()) + log.W.Ln("shutting down relay listener") + chk.E(s.httpServer.Shutdown(s.Ctx)) + if f, ok := s.relay.(relay.ShutdownAware); ok { + f.OnShutdown(s.Ctx) + } +} + +// Router returns the servemux that handles paths on the HTTP server of the relay. +func (s *Server) Router() *http.ServeMux { + return s.mux.ServeMux +} diff --git a/realy/server_test.go b/realy/server_test.go new file mode 100644 index 0000000..c6bddb9 --- /dev/null +++ b/realy/server_test.go @@ -0,0 +1,92 @@ +package realy + +//func TestServerStartShutdown(t *testing.T) { +// var ( +// inited bool +// storeInited bool +// shutdown bool +// ) +// c, cancel := context.Cancel(context.Bg()) +// rl := &testRelay{ +// c: c, +// Cancel: cancel, +// name: "test server start", +// init: func() error { +// inited = true +// return nil +// }, +// onShutdown: func(context.T) { shutdown = true }, +// storage: &testStorage{ +// init: func() error { storeInited = true; return nil }, +// }, +// } +// srv, _ := NewServer(&ServerParams{ +// Ctx: c, +// Cancel: cancel, +// Rl: rl, +// MaxLimit: ratel.DefaultMaxLimit, +// }) +// ready := make(chan bool) +// done := make(chan error) +// go func() { +// done <- srv.Start("127.0.0.1", 0, ready) +// close(done) +// }() +// <-ready +// +// // verify everything's initialized +// if !inited { +// t.Error("didn't call testRelay.init") +// } +// if !storeInited { +// t.Error("didn't call testStorage.init") +// } +// +// // check that http requests are served +// if _, err := http.Get("http://" + srv.Addr); chk.T(err) { +// t.Errorf("GET %s: %v", srv.Addr, err) +// } +// +// // verify server shuts down +// defer srv.Cancel() +// srv.Shutdown() +// if !shutdown { +// t.Error("didn't call testRelay.onShutdown") +// } +// select { +// case err := <-done: +// if err != nil { +// t.Errorf("srv.Start: %v", err) +// } +// case <-time.After(time.Second): +// t.Error("srv.Start too long to return") +// } +//} + +//func TestServerShutdownWebsocket(t *testing.T) { +// // set up a new relay server +// srv := startTestRelay(context.Bg(), t, &testRelay{storage: &testStorage{}}) +// +// // connect a client to it +// ctx1, cancel := context.Timeout(context.Bg(), 2*time.Second) +// defer cancel() +// client, err := ws.RelayConnect(ctx1, "ws://"+srv.Addr) +// if err != nil { +// t.Fatalf("nostr.RelayConnectContext: %v", err) +// } +// +// // now, shut down the server +// defer srv.Cancel() +// srv.Shutdown() +// +// // wait for the client to receive a "connection close" +// time.Sleep(1 * time.Second) +// err = client.ConnectionError +// if e := errors.Unwrap(err); e != nil { +// err = e +// } +// var closedError wsutil.ClosedError +// if !errors.As(err, &closedError) { +// t.Errorf("client.ConnectionError: %v (%T); want wsutil.ClosedError", err, err) +// } +//} diff --git a/realy/testrelay.go b/realy/testrelay.go new file mode 100644 index 0000000..4e62fa3 --- /dev/null +++ b/realy/testrelay.go @@ -0,0 +1,153 @@ +package realy + +import ( + "io" + "net/http" + "testing" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/eventid" + "orly.dev/filter" + "orly.dev/interfaces/store" + "orly.dev/units" +) + +func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server { + t.Helper() + srv, _ := NewServer( + &ServerParams{ + Ctx: c, + Cancel: func() {}, + Rl: tr, + MaxLimit: 500 * units.Kb, + }, + ) + started := make(chan bool) + go srv.Start("127.0.0.1", 0, started) + <-started + return srv +} + +type testRelay struct { + c context.T + Cancel context.F + name string + storage store.I + init func() error + onShutdown func(context.T) + acceptEvent func(*event.E) bool +} + +func (tr *testRelay) Name() string { return tr.name } +func (tr *testRelay) Storage() store.I { return tr.storage } +func (tr *testRelay) Origin() string { return "example.com" } +func (tr *testRelay) Init() error { + tr.c, tr.Cancel = context.Cancel(context.Bg()) + if fn := tr.init; fn != nil { + return fn() + } + return nil +} + +func (tr *testRelay) NoLimiter(pubKey []byte) bool { + return false +} + +func (tr *testRelay) Owners() [][]byte { return nil } + +func (tr *testRelay) OnShutdown(c context.T) { + if fn := tr.onShutdown; fn != nil { + fn(c) + } +} + +func (tr *testRelay) AcceptEvent( + c context.T, evt *event.E, hr *http.Request, origin string, + authedPubkey []byte, +) (ok bool, notice string, after func()) { + if fn := tr.acceptEvent; fn != nil { + return fn(evt), "", nil + } + return true, "", nil +} + +type testStorage struct { + init func() error + close func() + queryEvents func(context.T, *filter.F) ([]*event.E, error) + deleteEvent func(c context.T, eid *eventid.T, noTombstone ...bool) error + saveEvent func(context.T, *event.E) error + countEvents func(context.T, *filter.F) (int, bool, error) +} + +func (string *testStorage) Import(r io.Reader) { + // TODO implement me + panic("implement me") +} + +func (string *testStorage) Export(c context.T, w io.Writer, pubkeys ...[]byte) { + // TODO implement me + panic("implement me") +} + +func (string *testStorage) Sync() (err error) { + // TODO implement me + panic("implement me") +} + +func (string *testStorage) Nuke() (err error) { + // TODO implement me + panic("implement me") +} + +func (string *testStorage) Path() string { + // TODO implement me + panic("implement me") +} + +func (string *testStorage) Init(path string) error { + if fn := string.init; fn != nil { + return fn() + } + return nil +} + +func (string *testStorage) Close() (err error) { + if fn := string.close; fn != nil { + fn() + } + return +} + +func (string *testStorage) QueryEvents(c context.T, f *filter.F) ( + evs event.S, err error, +) { + if fn := string.queryEvents; fn != nil { + return fn(c, f) + } + return nil, nil +} + +func (string *testStorage) DeleteEvent(c context.T, ev *eventid.T) error { + if fn := string.deleteEvent; fn != nil { + return fn(c, ev) + } + return nil +} + +func (string *testStorage) SaveEvent(c context.T, e *event.E) error { + if fn := string.saveEvent; fn != nil { + return fn(c, e) + } + return nil +} + +func (string *testStorage) CountEvents(c context.T, f *filter.F) ( + int, bool, error, +) { + if fn := string.countEvents; fn != nil { + return fn(c, f) + } + return 0, false, nil +} diff --git a/reason/reason.go b/reason/reason.go deleted file mode 100644 index 94191e4..0000000 --- a/reason/reason.go +++ /dev/null @@ -1,54 +0,0 @@ -package reason - -import ( - "bytes" - "errors" - "fmt" -) - -// R is the machine-readable prefix before the colon in an OK or CLOSED envelope message. -// Below are the most common kinds that are mentioned in NIP-01. -type R []byte - -var ( - AuthRequired = R("auth-required") - PoW = R("pow") - Duplicate = R("duplicate") - Blocked = R("blocked") - RateLimited = R("rate-limited") - Invalid = R("invalid") - Error = R("error") - Unsupported = R("unsupported") - Restricted = R("restricted") -) - -// S returns the R as a string -func (r R) S() string { return string(r) } - -// B returns the R as a byte slice. -func (r R) B() []byte { return r } - -// IsPrefix returns whether a text contains the same R prefix. -func (r R) IsPrefix(reason []byte) bool { - return bytes.HasPrefix(reason, r.B()) -} - -// F allows creation of a full R text with a printf style format. -func (r R) F(format string, params ...any) []byte { - return Msg(r, format, params...) -} - -// Err allows creation of the error reason as an error -func (r R) Err(format string, params ...any) error { - b := Msg(r, format, params...) - return errors.New(string(b)) -} - -// Msg constructs a properly formatted message with a machine-readable prefix for OK and CLOSED -// envelopes. -func Msg(prefix R, format string, params ...any) []byte { - if len(prefix) < 1 { - prefix = Error - } - return []byte(fmt.Sprintf(prefix.S()+": "+format, params...)) -} diff --git a/relay/interface.go b/relay/interface.go new file mode 100644 index 0000000..81cd309 --- /dev/null +++ b/relay/interface.go @@ -0,0 +1,121 @@ +// Package relay contains a collection of interfaces for enabling the building +// of modular nostr relay implementations. +package relay + +import ( + "orly.dev/context" + "orly.dev/interfaces/store" + "orly.dev/relayinfo" +) + +// I is the main interface for implementing a nostr relay. +type I interface { + // Name is used as the "name" field in NIP-11 and as a prefix in default Server logging. + // For other NIP-11 fields, see [Informationer]. + Name() string + // Init is called at the very beginning by [Server.Start], allowing a realy + // to initialize its internal resources. + // Also see [eventstore.I.Init]. + Init() error + // Storage returns the realy storage implementation. + Storage() store.I + // // Owners returns the list of pubkeys designated as owners of the relay. + // Owners() [][]byte + // // AcceptEvent is called for every nostr event received by the server. + // // + // // If the returned value is true, the event is passed on to [Storage.SaveEvent]. + // // Otherwise, the server responds with a negative and "blocked" message as described + // // in NIP-20. + // // + // // Moderation via follow/mute lists of moderator npubs should deny events from + // // npubs listed in moderator mute lists. Events submitted by users not on the + // // moderator follow lists but submitting events containing p tags for direct + // // messages, that are not on the mute list, that do not yet have a reply, should accept + // // direct and group message events until there is three and thereafter will be restricted + // // until the user adds them to their follow list. + // AcceptEvent(c context.T, ev *event.E, hr *http.Request, origin string, + // authedPubkey []byte) (accept bool, notice string, afterSave func()) +} + +// // ReqAcceptor is the main interface for implementing a nostr +// type ReqAcceptor interface { +// // AcceptReq is called for every nostr request filters received by the +// // server. If the returned value is true, the filters is passed on to +// // [Storage.QueryEvent]. +// // +// // If moderation of access by follow/mute list of moderator npubs is enabled, +// // only users in the follow lists of mods are allowed read access (accepting +// // requests), all others should receive an OK,false,restricted response if +// // authed and if not authed CLOSED,restricted. +// // +// // If a user is not whitelisted by follow and not blacklisted by mute and the +// // request is for a message that contains their npub in a `p` tag that are +// // direct or group chat messages they also can be accepted, enabling full +// // support for in/outbox access. +// // +// // In order to support the ability to respond to +// AcceptReq(c context.T, hr *http.Request, id []byte, ff *filters.T, +// authedPubkey []byte) (allowed *filters.T, +// ok bool, modified bool) +// } +// +// type FilterAcceptor interface { +// // AcceptFilter is basically the same as AcceptReq except it is additional to +// // enable the simplified filter query type. +// AcceptFilter(c context.T, hr *http.Request, f *filter.S, +// authedPubkey []byte) (allowed *filter.S, ok bool, modified bool) +// } +// +// // Authenticator is the interface for implementing NIP-42. +// // ServiceURL() returns the URL used to verify the "AUTH" event from clients. +// type Authenticator interface { +// AuthRequired() bool +// ServiceUrl(r *http.Request) string +// } +// +// type Injector interface { +// InjectEvents() event.C +// } + +// Informationer is called to compose NIP-11 response to an HTTP request +// with application/nostr+json mime type. +// See also [I.Name]. +type Informationer interface { + GetNIP11InformationDocument() *relayinfo.T +} + +// // WebSocketHandler is passed nostr message types unrecognized by the server. The +// // server handles "EVENT", "REQ" and "CLOSE" messages, as described in NIP-01. +// type WebSocketHandler interface { +// HandleUnknownType(ws *ws.Listener, t string, request []byte) +// } + +// ShutdownAware is called during the server shutdown. +// See [Server.Shutdown] for details. +type ShutdownAware interface { + OnShutdown(context.T) +} + +// Logger is what [Server] uses to log messages. +type Logger interface { + Infof(format string, v ...any) + Warningf(format string, v ...any) + Errorf(format string, v ...any) +} + +// // AdvancedDeleter methods are called before and after [Storage.DeleteEvent]. +// type AdvancedDeleter interface { +// BeforeDelete(ctx context.T, id, pubkey []byte) +// AfterDelete(id, pubkey []byte) +// } + +// // AdvancedSaver methods are called before and after [Storage.SaveEvent]. +// type AdvancedSaver interface { +// BeforeSave(context.T, *event.E) +// AfterSave(*event.E) +// } + +// // EventCounter implements the NIP-45 count API. +// type EventCounter interface { +// CountEvents(c context.T, f *filter.T) (count int, approx bool, err error) +// } diff --git a/relayinfo/fetch.go b/relayinfo/fetch.go index ac850d5..ce50a51 100644 --- a/relayinfo/fetch.go +++ b/relayinfo/fetch.go @@ -4,11 +4,11 @@ import ( "encoding/json" "io" "net/http" + "orly.dev/chk" + "orly.dev/errorf" "time" - "orly.dev/chk" "orly.dev/context" - "orly.dev/errorf" "orly.dev/normalize" ) diff --git a/relayinfo/types.go b/relayinfo/types.go index c14ed85..7ca38cf 100644 --- a/relayinfo/types.go +++ b/relayinfo/types.go @@ -3,13 +3,13 @@ package relayinfo import ( "encoding/json" "errors" + "orly.dev/chk" + "orly.dev/log" "os" "sort" "sync" - "orly.dev/chk" "orly.dev/kinds" - "orly.dev/log" "orly.dev/number" "orly.dev/timestamp" ) @@ -66,7 +66,7 @@ var ( "Nostr Marketplace (for resilient marketplaces)", 15, } NIP15 = NostrMarketplace - EventTreatment = NIP{"Event Treatment", 16} + EventTreatment = NIP{"EVent Treatment", 16} NIP16 = EventTreatment Reposts = NIP{"Reposts", 18} NIP18 = Reposts diff --git a/reload.sh b/reload.sh new file mode 100755 index 0000000..9d7a710 --- /dev/null +++ b/reload.sh @@ -0,0 +1,7 @@ +#!/usr/bin/bash +until false; do + echo "Respawning.." >&2 + sleep 1 + reset + go run ./cmd/realy/. +done diff --git a/scripts/runtests.sh b/scripts/runtests.sh new file mode 100644 index 0000000..4a0e704 --- /dev/null +++ b/scripts/runtests.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +go test -v ./... -bench=. -run=xxx -benchmem \ No newline at end of file diff --git a/servemux/serveMux.go b/servemux/serveMux.go deleted file mode 100644 index 451decc..0000000 --- a/servemux/serveMux.go +++ /dev/null @@ -1,26 +0,0 @@ -package servemux - -import ( - "net/http" -) - -type S struct { - *http.ServeMux -} - -func New() (c *S) { - c = &S{http.NewServeMux()} - return -} - -func (c *S) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE") - w.Header().Set( - "Access-Control-Allow-Headers", "Content-Type, Authorization,Upgrade", - ) - if r.Method == http.MethodOptions { - return - } - c.ServeMux.ServeHTTP(w, r) -} diff --git a/server/add-event.go b/server/add-event.go deleted file mode 100644 index f2444c2..0000000 --- a/server/add-event.go +++ /dev/null @@ -1,24 +0,0 @@ -package server - -import ( - "net/http" - "orly.dev/chk" - "orly.dev/context" - "orly.dev/event" - "orly.dev/log" -) - -func (s *S) AddEvent( - c context.T, ev *event.E, hr *http.Request, remote string, -) (accepted bool, message []byte) { - if !ev.Kind.IsEphemeral() { - if _, _, err := s.Store.SaveEvent(c, ev); chk.E(err) { - message = []byte(err.Error()) - return - } - } else { - log.I.F("ephemeral event %s", ev.Serialize()) - } - accepted = true - return -} diff --git a/server/handle-relayinfo.go b/server/handle-relayinfo.go deleted file mode 100644 index 2c9ab78..0000000 --- a/server/handle-relayinfo.go +++ /dev/null @@ -1,54 +0,0 @@ -package server - -import ( - "encoding/json" - "net/http" - "orly.dev/chk" - "orly.dev/helpers" - "orly.dev/log" - "orly.dev/relayinfo" - "orly.dev/version" - "sort" -) - -func (s *S) HandleRelayInfo(w http.ResponseWriter, r *http.Request) { - remote := helpers.GetRemoteFromReq(r) - log.T.F("handling relay info request from %s", remote) - r.Header.Set("Content-Type", "application/json") - var info *relayinfo.T - supportedNIPs := relayinfo.GetList( - relayinfo.BasicProtocol, - relayinfo.RelayInformationDocument, - relayinfo.GenericTagQueries, - relayinfo.EventTreatment, - relayinfo.ParameterizedReplaceableEvents, - // relayinfo.CommandResults, - // relayinfo.NostrMarketplace, - // relayinfo.EncryptedDirectMessage, - // relayinfo.EventDeletion, - // relayinfo.ExpirationTimestamp, - // relayinfo.ProtectedEvents, - // relayinfo.RelayListMetadata, - ) - // if s.ServiceURL(r) != "" { - // supportedNIPs = append(supportedNIPs, relayinfo.Authentication.N()) - // } - sort.Sort(supportedNIPs) - log.T.Ln("supported NIPs", supportedNIPs) - info = &relayinfo.T{ - Name: s.Cfg.AppName, - Description: version.Description, - Nips: supportedNIPs, - Software: version.URL, - Version: version.V, - Limitation: relayinfo.Limits{ - // MaxLimit: s.MaxLimit, - // AuthRequired: s.AuthRequired(), - // RestrictedWrites: !s.PublicReadable() || s.AuthRequired() || len(s.owners) > 0, - }, - Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png", - } - if err := json.NewEncoder(w).Encode(info); chk.E(err) { - - } -} diff --git a/server/server.go b/server/server.go deleted file mode 100644 index 9a10910..0000000 --- a/server/server.go +++ /dev/null @@ -1,79 +0,0 @@ -package server - -import ( - "errors" - "github.com/danielgtaylor/huma/v2" - "github.com/rs/cors" - "net" - "net/http" - "orly.dev/chk" - "orly.dev/config" - "orly.dev/context" - "orly.dev/interfaces/store" - "orly.dev/log" - "orly.dev/servemux" - "sync" - "time" -) - -type S struct { - Ctx context.T - Cancel context.F - WG *sync.WaitGroup - Addr string - Cfg *config.C - Mux *servemux.S - HTTPServer *http.Server - Store store.I - huma.API -} - -func (s *S) Storage() store.I { return s.Store } - -func (s *S) Init() {} - -func (s *S) Start() (err error) { - s.WG.Add(1) - s.Init() - var listener net.Listener - if listener, err = net.Listen("tcp", s.Addr); chk.E(err) { - return - } - s.HTTPServer = &http.Server{ - Handler: cors.Default().Handler(s), - Addr: s.Addr, - ReadHeaderTimeout: 7 * time.Second, - IdleTimeout: 28 * time.Second, - } - if s.Cfg.DNS != "" { - log.I.F("listening on %s http://%s", s.Cfg.DNS, s.Addr) - } else { - log.I.F("listening on http://%s\n", s.Addr) - } - if err = s.HTTPServer.Serve(listener); errors.Is( - err, http.ErrServerClosed, - ) { - err = nil - return - } else if chk.E(err) { - return - } - return -} - -// ServeHTTP is the server http.Handler. -func (s *S) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.Mux.ServeHTTP(w, r) -} - -func (s *S) Shutdown() { - log.W.Ln("shutting down relay") - s.Cancel() - // log.W.Ln("closing event store") - // chk.E(s.Store.Close()) - log.W.Ln("shutting down relay listener") - chk.E(s.HTTPServer.Shutdown(s.Ctx)) - s.WG.Done() -} - -func (s *S) Context() context.T { return s.Ctx } diff --git a/sha256/LICENSE b/sha256/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/sha256/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sha256/README.md b/sha256/README.md new file mode 100644 index 0000000..ac5c783 --- /dev/null +++ b/sha256/README.md @@ -0,0 +1,137 @@ +# sha256-simd + +Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions for x86 and ARM64 for ARM. +On AVX512 it provides an up to 8x improvement (over 3 GB/s per core). +SHA Extensions give a performance boost of close to 4x over native. + +## Introduction + +This package is designed as a replacement for `crypto/sha256`. +For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement. + +This package uses Golang assembly. +The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al. + +## Support for Intel SHA Extensions + +Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)). + +``` +$ benchcmp avx2.txt sha-ext.txt +benchmark AVX2 MB/s SHA Ext MB/s speedup +BenchmarkHash5M 514.40 1975.17 3.84x +``` + +Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, +endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, +the other changes increased everything roughly 50%. + +## Support for AVX512 + +We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU): + +``` +$ benchcmp avx2.txt avx512.txt +benchmark AVX2 MB/s AVX512 MB/s speedup +BenchmarkHash5M 448.62 3498.20 7.80x +``` + +The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide). + +Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message. + +Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice. + +Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion: + +```go +import "mleku.dev/pkg/sha256" + +func main() { + server := sha256.NewAvx512Server() + h512 := sha256.NewAvx512(server) + h512.Write(fileBlock) + digest := h512.Sum([]byte{}) +} +``` + +Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance: +* Have many go routines doing SHA256 calculations in parallel. +* Try to Write() messages in multiples of 64 bytes. +* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible). + +More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores. + +## Drop-In Replacement + +The following code snippet shows how you can use `github.com/minio/sha256-simd`. +This will automatically select the fastest method for the architecture on which it will be executed. + +```go +import "github.com/minio/sha256-simd" + +func main() { + ... + shaWriter := sha256.New() + io.Copy(shaWriter, file) + ... +} +``` + +## Performance + +Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB. + +| Processor | SIMD | Speed (MB/s) | +| --------------------------------- | ------- | ------------:| +| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 | +| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 | +| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 | + +## asm2plan9s + +In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. + +## Why and benefits + +One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server. + +Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc. + +## ARM SHA Extensions + +The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)). + + ``` + sha256h q2, q3, v9.4s + sha256h2 q3, q4, v9.4s + sha256su0 v5.4s, v6.4s + rev32 v8.16b, v8.16b + add v9.4s, v7.4s, v18.4s + mov v4.16b, v2.16b + sha256h q2, q3, v10.4s + sha256h2 q3, q4, v10.4s + sha256su0 v6.4s, v7.4s + sha256su1 v5.4s, v7.4s, v8.4s + ``` + +### Detailed benchmarks + +Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/). + +``` +minio@minio-arm:$ benchcmp golang.txt arm64.txt +benchmark golang arm64 speedup +BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x +BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x +BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x +BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x +``` + +## License + +Released under the Apache License v2.0. You can find the complete text in the file LICENSE. + +## Contributing + +Contributions are welcome, please send PRs for any enhancements. diff --git a/sha256/cpuid_other.go b/sha256/cpuid_other.go new file mode 100644 index 0000000..de586c9 --- /dev/null +++ b/sha256/cpuid_other.go @@ -0,0 +1,52 @@ +// Minio Cloud Storage, (C) 2021 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +import ( + "bytes" + "io/ioutil" + "runtime" + + "github.com/klauspost/cpuid/v2" +) + +var ( + hasIntelSha = runtime.GOARCH == "amd64" && cpuid.CPU.Supports(cpuid.SHA, cpuid.SSSE3, + cpuid.SSE4) + hasAvx512 = cpuid.CPU.Supports(cpuid.AVX512F, cpuid.AVX512DQ, cpuid.AVX512BW, + cpuid.AVX512VL) +) + +func hasArmSha2() bool { + if cpuid.CPU.Has(cpuid.SHA2) { + return true + } + if runtime.GOARCH != "arm64" || runtime.GOOS != "linux" { + return false + } + + // Fall back to hacky cpuinfo parsing... + const procCPUInfo = "/proc/cpuinfo" + + // Feature to check for. + const sha256Feature = "sha2" + + cpuInfo, err := ioutil.ReadFile(procCPUInfo) + if err != nil { + return false + } + return bytes.Contains(cpuInfo, []byte(sha256Feature)) +} diff --git a/sha256/doc.go b/sha256/doc.go new file mode 100644 index 0000000..b00ce98 --- /dev/null +++ b/sha256/doc.go @@ -0,0 +1,6 @@ +// Package sha256 is taken from github.com/minio/sha256-simd, implementing, +// where available, an accelerated SIMD implementation of sha256. +// +// This package should be updated against the upstream version from time to +// time. +package sha256 diff --git a/sha256/sha256.go b/sha256/sha256.go new file mode 100644 index 0000000..f146bbd --- /dev/null +++ b/sha256/sha256.go @@ -0,0 +1,468 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "hash" +) + +// Size - The size of a SHA256 checksum in bytes. +const Size = 32 + +// BlockSize - The blocksize of SHA256 in bytes. +const BlockSize = 64 + +const ( + chunk = BlockSize + init0 = 0x6A09E667 + init1 = 0xBB67AE85 + init2 = 0x3C6EF372 + init3 = 0xA54FF53A + init4 = 0x510E527F + init5 = 0x9B05688C + init6 = 0x1F83D9AB + init7 = 0x5BE0CD19 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint32 + x [chunk]byte + nx int + len uint64 +} + +// Reset digest back to default +func (d *digest) Reset() { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + d.nx = 0 + d.len = 0 +} + +type blockfuncType int + +const ( + blockfuncStdlib blockfuncType = iota + blockfuncIntelSha + blockfuncArmSha2 + blockfuncForceGeneric = -1 +) + +var blockfunc blockfuncType + +func init() { + switch { + case hasIntelSha: + blockfunc = blockfuncIntelSha + case hasArmSha2(): + blockfunc = blockfuncArmSha2 + } +} + +// New returns a new hash.Hash computing the SHA256 checksum. +func New() hash.Hash { + if blockfunc == blockfuncStdlib { + // Fallback to the standard golang implementation + // if no features were found. + return sha256.New() + } + + d := new(digest) + d.Reset() + return d +} + +// Sum256 - single caller sha256 helper +func Sum256(data []byte) (result [Size]byte) { + var d digest + d.Reset() + d.Write(data) + result = d.checkSum() + return +} + +// Return size of checksum +func (d *digest) Size() int { return Size } + +// Return blocksize of checksum +func (d *digest) BlockSize() int { return BlockSize } + +// Write to digest +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Return sha256 sum in bytes +func (d *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:]...) +} + +// Intermediate checksum function +func (d *digest) checkSum() (digest [Size]byte) { + n := d.nx + + var k [64]byte + copy(k[:], d.x[:n]) + + k[n] = 0x80 + + if n >= 56 { + block(d, k[:]) + + // clear block buffer - go compiles this to optimal 1x xorps + 4x movups + // unfortunately expressing this more succinctly results in much worse code + k[0] = 0 + k[1] = 0 + k[2] = 0 + k[3] = 0 + k[4] = 0 + k[5] = 0 + k[6] = 0 + k[7] = 0 + k[8] = 0 + k[9] = 0 + k[10] = 0 + k[11] = 0 + k[12] = 0 + k[13] = 0 + k[14] = 0 + k[15] = 0 + k[16] = 0 + k[17] = 0 + k[18] = 0 + k[19] = 0 + k[20] = 0 + k[21] = 0 + k[22] = 0 + k[23] = 0 + k[24] = 0 + k[25] = 0 + k[26] = 0 + k[27] = 0 + k[28] = 0 + k[29] = 0 + k[30] = 0 + k[31] = 0 + k[32] = 0 + k[33] = 0 + k[34] = 0 + k[35] = 0 + k[36] = 0 + k[37] = 0 + k[38] = 0 + k[39] = 0 + k[40] = 0 + k[41] = 0 + k[42] = 0 + k[43] = 0 + k[44] = 0 + k[45] = 0 + k[46] = 0 + k[47] = 0 + k[48] = 0 + k[49] = 0 + k[50] = 0 + k[51] = 0 + k[52] = 0 + k[53] = 0 + k[54] = 0 + k[55] = 0 + k[56] = 0 + k[57] = 0 + k[58] = 0 + k[59] = 0 + k[60] = 0 + k[61] = 0 + k[62] = 0 + k[63] = 0 + } + binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3) + block(d, k[:]) + + { + const i = 0 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 1 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 2 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 3 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 4 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 5 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 6 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 7 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + + return +} + +func block(dig *digest, p []byte) { + if blockfunc == blockfuncIntelSha { + blockIntelShaGo(dig, p) + } else if blockfunc == blockfuncArmSha2 { + blockArmSha2Go(dig, p) + } else { + blockGeneric(dig, p) + } +} + +func blockGeneric(dig *digest, p []byte) { + var w [64]uint32 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + for i := 16; i < 64; i++ { + v1 := w[i-2] + t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) + v2 := w[i-15] + t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 64; i++ { + t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} + +var _K = []uint32{ + 0x428a2f98, + 0x71374491, + 0xb5c0fbcf, + 0xe9b5dba5, + 0x3956c25b, + 0x59f111f1, + 0x923f82a4, + 0xab1c5ed5, + 0xd807aa98, + 0x12835b01, + 0x243185be, + 0x550c7dc3, + 0x72be5d74, + 0x80deb1fe, + 0x9bdc06a7, + 0xc19bf174, + 0xe49b69c1, + 0xefbe4786, + 0x0fc19dc6, + 0x240ca1cc, + 0x2de92c6f, + 0x4a7484aa, + 0x5cb0a9dc, + 0x76f988da, + 0x983e5152, + 0xa831c66d, + 0xb00327c8, + 0xbf597fc7, + 0xc6e00bf3, + 0xd5a79147, + 0x06ca6351, + 0x14292967, + 0x27b70a85, + 0x2e1b2138, + 0x4d2c6dfc, + 0x53380d13, + 0x650a7354, + 0x766a0abb, + 0x81c2c92e, + 0x92722c85, + 0xa2bfe8a1, + 0xa81a664b, + 0xc24b8b70, + 0xc76c51a3, + 0xd192e819, + 0xd6990624, + 0xf40e3585, + 0x106aa070, + 0x19a4c116, + 0x1e376c08, + 0x2748774c, + 0x34b0bcb5, + 0x391c0cb3, + 0x4ed8aa4a, + 0x5b9cca4f, + 0x682e6ff3, + 0x748f82ee, + 0x78a5636f, + 0x84c87814, + 0x8cc70208, + 0x90befffa, + 0xa4506ceb, + 0xbef9a3f7, + 0xc67178f2, +} + +const ( + magic256 = "sha\x03" + marshaledSize = len(magic256) + 8*4 + chunk + 8 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic256...) + b = appendUint32(b, d.h[0]) + b = appendUint32(b, d.h[1]) + b = appendUint32(b, d.h[2]) + b = appendUint32(b, d.h[3]) + b = appendUint32(b, d.h[4]) + b = appendUint32(b, d.h[5]) + b = appendUint32(b, d.h[6]) + b = appendUint32(b, d.h[7]) + b = append(b, d.x[:d.nx]...) + b = b[:len(b)+len(d.x)-d.nx] // already zero + b = appendUint64(b, d.len) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic256) || string(b[:len(magic256)]) != magic256 { + return errors.New("crypto/sha256: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/sha256: invalid hash state size") + } + b = b[len(magic256):] + b, d.h[0] = consumeUint32(b) + b, d.h[1] = consumeUint32(b) + b, d.h[2] = consumeUint32(b) + b, d.h[3] = consumeUint32(b) + b, d.h[4] = consumeUint32(b) + b, d.h[5] = consumeUint32(b) + b, d.h[6] = consumeUint32(b) + b, d.h[7] = consumeUint32(b) + b = b[copy(d.x[:], b):] + b, d.len = consumeUint64(b) + d.nx = int(d.len % chunk) + return nil +} + +func appendUint32(b []byte, v uint32) []byte { + return append(b, + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v), + ) +} + +func appendUint64(b []byte, v uint64) []byte { + return append(b, + byte(v>>56), + byte(v>>48), + byte(v>>40), + byte(v>>32), + byte(v>>24), + byte(v>>16), + byte(v>>8), + byte(v), + ) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + _ = b[7] + x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + _ = b[3] + x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + return b[4:], x +} diff --git a/sha256/sha256_test.go b/sha256/sha256_test.go new file mode 100644 index 0000000..ce9a6ca --- /dev/null +++ b/sha256/sha256_test.go @@ -0,0 +1,2886 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Using this part of Minio codebase under the license +// Apache License Version 2.0 with modifications + +// SHA256 hash algorithm. See FIPS 180-2. + +package sha256 + +import ( + "bytes" + "encoding" + "encoding/hex" + "fmt" + "hash" + "io" + "orly.dev/chk" + "strings" + "testing" +) + +type sha256Test struct { + out [32]byte + in string +} + +var golden = []sha256Test{ + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + "", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + }, + "a", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + }, + "ab", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + }, + "abc", + }, + { + [32]byte{ + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + }, + "abcd", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + "abcde", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + "abcdef", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + }, + "abcdefg", + }, + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + }, + "abcdefgh", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + }, + "abcdefghi", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + }, + "abcdefghij", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "Discard medicine more than two years old.", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "He who has a shady past knows that nice guys finish last.", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + "I wouldn't marry him with a ten foot pole.", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave", + }, + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + }, + "The days of the digital watch are numbered. -Tom Stoppard", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "Nepal premier won't resign.", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + }, + "For every action there is an equal and opposite government program.", + }, + { + [32]byte{ + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + "His money is twice tainted: 'taint yours and 'taint mine.", + }, + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "It's a tiny change to the code and not completely disgusting. - Bob Manchek", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + "size: a.out: bad magic", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + }, + "The major problem is with sendmail. -Mark Horton", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "Give me a rock, paper and scissors and I will move the world. CCFestoon", + }, + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "If the enemy is within range, then so are you.", + }, + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + "It's well we cannot hear the screams/That we create in others' dreams.", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + "You remind me of a TV show, but that's all right: I watch it anyway.", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "C is as portable as Stonehedge!!", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + }, + "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley", + }, + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule", + }, + { + [32]byte{ + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + }, + "How can you write a big system without C++? -Paul Glick", + }, + // $ echo -n "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123" | sha256sum + // 13d8b6bf5cc79c03c07c719c48597bd33b79677e65098589b1580fca7f22bb22 + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123", + }, + // $ echo -n "BCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234" | sha256sum + // 624ddef3009879c6874da2dd771d54f7330781b60e1955ceff5f9dce8bf4ea43 + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + "BCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234", + }, + // $ echo -n "CDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345" | sha256sum + // cc031589b70dd4b24dc6def2121835ef1aa8074ff6952cdd3f81b5099a93c58d + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + }, + "CDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345", + }, + // $ echo -n "DEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456" | sha256sum + // d354abb6d538402db3d73daf95537a255ebaf3a943c80205be163e044fc46a70 + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + }, + "DEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456", + }, + // $ echo -n "EFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567" | sha256sum + // f78410b90a20b521afb28f41d6388482afab7265ff8884aa6290cc9f9ada30d3 + { + [32]byte{ + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "EFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567", + }, + // $ echo -n "FGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345678" | sha256sum + // c93a8cb7ed80166b15b79c8617410ca69e46fa1e3c1d14876699d3ce6090384f + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + "FGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz12345678", + }, + // $ echo -n "GHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456789" | sha256sum + // 6cb808e9a7fb53fa680824f08554b660d29a4afc9a101f990b4bae3a12b7fbd8 + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "GHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz123456789", + }, + // $ echo -n "HIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890" | sha256sum + // 84e8dd1afa78db222860ed40b6fcfc7a269469365f81f5712fb589555bdb01fe + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + }, + "HIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890", + }, + // $ echo -n "IJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890A" | sha256sum + // accab8e85b6bd178e975aaaa354aed8258bcd6af3e61bd4f12267635856cab0b + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + }, + "IJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890A", + }, + // $ echo -n "JKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890AB" | sha256sum + // 107f5ad8bc5d427246fc5f9c581134b61d8ba447e877df56cddad2bf53789172 + { + [32]byte{ + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + }, + "JKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890AB", + }, + // $ echo -n "KLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABC" | sha256sum + // 7666f65b234f78aa537c8d098b181091ce8b7866a0285b52e6bf31b6f21ca9bb + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + }, + "KLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABC", + }, + // $ echo -n "LMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCD" | sha256sum + // 4eba948ccee7289ab1f01628a1ab756dee39a6894aed217edc9a91a8b35e50ca + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "LMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCD", + }, + // $ echo -n "MNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDE" | sha256sum + // 5011218873e7ca84871668d26461e449e7033b7959d69cfb5c2fee773c3d432d + { + [32]byte{ + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + "MNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDE", + }, + // $ echo -n "NOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEF" | sha256sum + // 6932b4ddaf3696e5d5270739bdbe6ab120bb8034b877bd3a8e5a5d5ca263e1c5 + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + "NOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEF", + }, + // $ echo -n "OPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFG" | sha256sum + // 91bb1bcbfcb4c093aab255a0b8c8b5b93605e2f51dd6b0898b70b9f3c10fc1f9 + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + }, + "OPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFG", + }, + // $ echo -n "PQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFGH" | sha256sum + // 0d1fa5355388e361c4591bd49c004e3d99044be274db43e91036611365aead02 + { + [32]byte{ + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + }, + "PQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890ABCDEFGH", + }, + // $ echo -n "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" | sha256sum + // b6ac3cc10386331c765f04f041c147d0f278f2aed8eaa021e2d0057fc6f6ff9e + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + }, + strings.Repeat("A", 128), + }, + // $ echo -n "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB" | sha256sum + // 7abaa701a6f4bb8d9ea3872a315597eb6f2ccfd03392d8d10560837f6136d06a + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + }, + strings.Repeat("B", 128), + }, + // $ echo -n "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC" | sha256sum + // 6e8b9325f779dba60c4c148dee5ded43b19ed20d25d66e338abec53b99174fe8 + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + }, + strings.Repeat("C", 128), + }, + // $ echo -n "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD" | sha256sum + // 7aa020c91ac4d32e17efd9b64648b92e375987e0eae7d0a58544ca1e4fc32c3c + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + strings.Repeat("D", 128), + }, + // $ echo -n "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE" | sha256sum + // 997f6a2fc44f1400e9f64d7eac11fe99e21f4b7a3fc2ff3ec95c2ef016abb9e5 + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + }, + strings.Repeat("E", 128), + }, + // $ echo -n "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" | sha256sum + // 5c6cdeb9ccaa1d9c57662605ab738ec4ecf0467f576d4c2d7fae48710215582a + { + [32]byte{ + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + }, + strings.Repeat("F", 128), + }, + // $ echo -n "GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG" | sha256sum + // 394394b5f0e91a21d1e932f9ed55e098c8b05f3668f77134eeee843fef1d1758 + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + strings.Repeat("G", 128), + }, + // $ echo -n "HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH" | sha256sum + // cab546612de68eaa849487342baadbac2561df6380ddac66137ef649e0cdfd0a + { + [32]byte{ + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + }, + strings.Repeat("H", 128), + }, + // $ echo -n "IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII" | sha256sum + // 2be96cc28445876429be3005db465d1b9c8ed1432e3ac6f1514b6e9eee725ad8 + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + strings.Repeat("I", 128), + }, + // $ echo -n "JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ" | sha256sum + // 238e5f81d54f2af58049b944c4a1b9516a36c2ef1e20887450b3482045714444 + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + strings.Repeat("J", 128), + }, + // $ echo -n "KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" | sha256sum + // f3a5b826c64951661ce22dc67f0f79d13f633f0601aca2f5e1cf1a9f17dffd4f + { + [32]byte{ + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + strings.Repeat("K", 128), + }, + // $ echo -n "LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL" | sha256sum + // 1e90c05bedd24dc3e297d5b8fb215b95d8b7f4a040ee912069614c7a3382725d + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + strings.Repeat("L", 128), + }, + // $ echo -n "MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM" | sha256sum + // 96239ac6fb99822797308f18d8455778fb5885103aa5ff59afe2219df657df99 + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + }, + strings.Repeat("M", 128), + }, + // $ echo -n "NNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNN" | sha256sum + // 11e7f5a6f15a4addba9b6b21bc4f8ecbdd969e179335269fc68d3a05f0f3da4a + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + }, + strings.Repeat("N", 128), + }, + // $ echo -n "OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO" | sha256sum + // ae843b7e4e00afeb972bf948a345b319cca8bd0bcaa1428c1c67c88ea663c1e0 + { + [32]byte{ + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + }, + strings.Repeat("O", 128), + }, + // $ echo -n "PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP" | sha256sum + // f16ef3e254ffb74b7e3c97d99486ef8c549e4c80bc6dfed7fe8c5e7e76f4fbcd + { + [32]byte{ + (1 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (0 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (0 * 4) + (0 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (0 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (0 * 64) + (1 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (0 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (1 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (0 * 128), + (0 * 1) + (0 * 2) + (1 * 4) + (0 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (1 * 2) + (0 * 4) + (1 * 8) + (1 * 16) + (1 * 32) + (1 * 64) + (1 * 128), + (1 * 1) + (0 * 2) + (1 * 4) + (1 * 8) + (0 * 16) + (0 * 32) + (1 * 64) + (1 * 128), + }, + strings.Repeat("P", 128), + }, +} + +func TestGolden(t *testing.T) { + blockfuncSaved := blockfunc + + defer func() { + blockfunc = blockfuncSaved + }() + + if true { + blockfunc = blockfuncForceGeneric + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf( + "Generic: Sum256 function: sha256(%s) = %s want %s", g.in, + s, + hex.EncodeToString(g.out[:]), + ) + } + } + } + + if hasIntelSha { + blockfunc = blockfuncIntelSha + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf( + "SHA: Sum256 function: sha256(%s) = %s want %s", g.in, s, + hex.EncodeToString(g.out[:]), + ) + } + } + } + + if hasArmSha2() { + blockfunc = blockfuncArmSha2 + for _, g := range golden { + s := fmt.Sprintf("%x", Sum256([]byte(g.in))) + if Sum256([]byte(g.in)) != g.out { + t.Fatalf( + "ARM: Sum256 function: sha256(%s) = %s want %s", g.in, s, + hex.EncodeToString(g.out[:]), + ) + } + } + } +} + +func TestSize(t *testing.T) { + c := New() + if got := c.Size(); got != Size { + t.Errorf("Size = %d; want %d", got, Size) + } +} + +func TestBlockSize(t *testing.T) { + c := New() + if got := c.BlockSize(); got != BlockSize { + t.Errorf("BlockSize = %d want %d", got, BlockSize) + } +} + +func benchmarkSize(b *testing.B, size int) { + var bench = New() + var buf = make([]byte, size) + b.SetBytes(int64(size)) + sum := make([]byte, bench.Size()) + b.ResetTimer() + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf) + bench.Sum(sum[:0]) + } +} + +func BenchmarkHash(b *testing.B) { + type alg struct { + n string + t blockfuncType + } + algos := make([]alg, 0, 2) + + algos = append(algos, alg{"Generic", blockfuncForceGeneric}) + if hasIntelSha { + algos = append(algos, alg{"IntelSHA", blockfuncIntelSha}) + } + if hasArmSha2() { + algos = append(algos, alg{"ArmSha2", blockfuncArmSha2}) + } + algos = append(algos, alg{"GoStdlib", blockfuncStdlib}) + + sizes := []struct { + n string + f func(*testing.B, int) + s int + }{ + {"8Bytes", benchmarkSize, 1 << 3}, + {"64Bytes", benchmarkSize, 1 << 6}, + {"1K", benchmarkSize, 1 << 10}, + {"8K", benchmarkSize, 1 << 13}, + {"1M", benchmarkSize, 1 << 20}, + {"5M", benchmarkSize, 5 << 20}, + {"10M", benchmarkSize, 5 << 21}, + } + + for _, a := range algos { + func() { + orig := blockfunc + defer func() { blockfunc = orig }() + + blockfunc = a.t + for _, y := range sizes { + s := a.n + "/" + y.n + b.Run(s, func(b *testing.B) { y.f(b, y.s) }) + } + }() + } +} + +type sha256TestGo struct { + out string + in string + halfState string // marshaled hash state after first half of in written, used by TestGoldenMarshal +} + +var golden256 = []sha256TestGo{ + { + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "", + "sha\x03j\t\xe6g\xbbg\xae\x85 0 { + t.Errorf("allocs = %d, want 0", n) + } +} diff --git a/sha256/sha256blockAvx512_amd64.asm b/sha256/sha256blockAvx512_amd64.asm new file mode 100644 index 0000000..c959b1a --- /dev/null +++ b/sha256/sha256blockAvx512_amd64.asm @@ -0,0 +1,686 @@ + +// 16x Parallel implementation of SHA256 for AVX512 + +// +// Minio Cloud Storage, (C) 2017 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// This code is based on the Intel Multi-Buffer Crypto for IPSec library +// and more specifically the following implementation: +// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm +// +// For Golang it has been converted into Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble the AVX512 instructions +// + +// Copyright (c) 2017, Intel Corporation +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Intel Corporation nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#define SHA256_DIGEST_ROW_SIZE 64 + +// arg1 +#define STATE rdi +#define STATE_P9 DI +// arg2 +#define INP_SIZE rsi +#define INP_SIZE_P9 SI + +#define IDX rcx +#define TBL rdx +#define TBL_P9 DX + +#define INPUT rax +#define INPUT_P9 AX + +#define inp0 r9 +#define SCRATCH_P9 R12 +#define SCRATCH r12 +#define maskp r13 +#define MASKP_P9 R13 +#define mask r14 +#define MASK_P9 R14 + +#define A zmm0 +#define B zmm1 +#define C zmm2 +#define D zmm3 +#define E zmm4 +#define F zmm5 +#define G zmm6 +#define H zmm7 +#define T1 zmm8 +#define TMP0 zmm9 +#define TMP1 zmm10 +#define TMP2 zmm11 +#define TMP3 zmm12 +#define TMP4 zmm13 +#define TMP5 zmm14 +#define TMP6 zmm15 + +#define W0 zmm16 +#define W1 zmm17 +#define W2 zmm18 +#define W3 zmm19 +#define W4 zmm20 +#define W5 zmm21 +#define W6 zmm22 +#define W7 zmm23 +#define W8 zmm24 +#define W9 zmm25 +#define W10 zmm26 +#define W11 zmm27 +#define W12 zmm28 +#define W13 zmm29 +#define W14 zmm30 +#define W15 zmm31 + + +#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \ + \ + \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0} + \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0} + \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0} + \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0} + \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0} + \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0} + \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0} + \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0} + \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0} + \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0} + \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0} + \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0} + \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0} + \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0} + \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0} + \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0} + \ + \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + \ + \ // process top half + vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0} + vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2} + vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0} + vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2} + \ + vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1} + vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2} + vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3} + vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0} + \ + \ // use r2 in place of t0 + vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0} + vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2} + vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0} + vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2} + \ + vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1} + vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2} + vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3} + vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0} + \ + \ // use r6 in place of t0 + vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0} + vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2} + vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0} + vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2} + \ + vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1} + vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2} + vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3} + vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0} + \ + \ // use r10 in place of t0 + vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0} + vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2} + vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00} + vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02} + \ + vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1} + vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2} + vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3} + vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0} + \ + \ // At this point, the registers that contain interesting data are: + \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12 + \ // Can use t1 and r14 as scratch registers + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \ + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \ + \ + vmovdqu32 _r14, [rbx] \ + vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0} + vmovdqu32 _t1, [r8] \ + vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vmovdqu32 _r2, [rbx] \ + vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1} + vmovdqu32 _t0, [r8] \ + vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vmovdqu32 _r3, [rbx] \ + vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r7, [r8] \ + vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vmovdqu32 _r1, [rbx] \ + vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3} + vmovdqu32 _r5, [r8] \ + vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vmovdqu32 _r0, [rbx] \ + vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0} + vmovdqu32 _r4, [r8] \ + vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4} + \ + vmovdqu32 _r6, [rbx] \ + vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1} + vmovdqu32 _r10, [r8] \ + vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5} + \ + vmovdqu32 _r11, [rbx] \ + vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2} + vmovdqu32 _r15, [r8] \ + vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6} + \ + vmovdqu32 _r9, [rbx] \ + vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3} + vmovdqu32 _r13, [r8] \ + vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7} + \ + \ // At this point r8 and r12 can be used as scratch registers + vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ + vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ + vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ + vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ + vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + + +// CH(A, B, C) = (A&B) ^ (~A&C) +// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G) +// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22 +// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25 +// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3 +// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10 + +// Main processing loop per round +#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \ + \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt + \ // T2 = SIGMA0(A) + MAJ(A, B, C) + \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2 + \ + \ // H becomes T2, then add T1 for A + \ // D becomes D + T1 for E + \ + vpaddd T1, _H, TMP3 \ // T1 = H + Kt + vmovdqu32 TMP0, _E \ + vprord TMP1, _E, 6 \ // ROR_6(E) + vprord TMP2, _E, 11 \ // ROR_11(E) + vprord TMP3, _E, 25 \ // ROR_25(E) + vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G) + vpaddd T1, T1, _WT \ // T1 = T1 + Wt + vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E) + vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G) + vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E) + vpaddd _D, _D, T1 \ // D = D + T1 + \ + vprord _H, _A, 2 \ // ROR_2(A) + vprord TMP2, _A, 13 \ // ROR_13(A) + vprord TMP3, _A, 22 \ // ROR_22(A) + vmovdqu32 TMP0, _A \ + vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C) + vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A) + vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C) + vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1 + \ + vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt + + +#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \ + vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2) + vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2) + vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7 + \ + vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15) + vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15) + vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + + \ // Wt-7 + sigma0(Wt-15) + + + +// Note this is reading in a block of data for one lane +// When all 16 are read, the data must be transposed to build msg schedule +#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \ + TESTQ $(1<(SB), TBL_P9 + vmovdqu32 TMP2, [TBL] + + // Get first K from table + MOVQ table+16(FP), TBL_P9 + vmovdqu32 TMP3, [TBL] + + // Save digests for later addition + vmovdqu32 [SCRATCH + 64*0], A + vmovdqu32 [SCRATCH + 64*1], B + vmovdqu32 [SCRATCH + 64*2], C + vmovdqu32 [SCRATCH + 64*3], D + vmovdqu32 [SCRATCH + 64*4], E + vmovdqu32 [SCRATCH + 64*5], F + vmovdqu32 [SCRATCH + 64*6], G + vmovdqu32 [SCRATCH + 64*7], H + + add IDX, 64 + + // Transpose input data + TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1) + + vpshufb W0, W0, TMP2 + vpshufb W1, W1, TMP2 + vpshufb W2, W2, TMP2 + vpshufb W3, W3, TMP2 + vpshufb W4, W4, TMP2 + vpshufb W5, W5, TMP2 + vpshufb W6, W6, TMP2 + vpshufb W7, W7, TMP2 + vpshufb W8, W8, TMP2 + vpshufb W9, W9, TMP2 + vpshufb W10, W10, TMP2 + vpshufb W11, W11, TMP2 + vpshufb W12, W12, TMP2 + vpshufb W13, W13, TMP2 + vpshufb W14, W14, TMP2 + vpshufb W15, W15, TMP2 + + // MSG Schedule for W0-W15 is now complete in registers + // Process first 48 rounds + // Calculate next Wt+16 after processing is complete and Wt is unneeded + + PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + + // Check if this is the last block + sub INP_SIZE, 1 + JE lastLoop + + // Load next mask for inputs + ADDQ $8, MASKP_P9 + MOVQ (MASKP_P9), MASK_P9 + + // Process last 16 rounds + // Read in next block msg data for use in first 16 words of msg sched + + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W0, 0, skipNext0) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W1, 1, skipNext1) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15( W2, 2, skipNext2) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15( W3, 3, skipNext3) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15( W4, 4, skipNext4) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15( W5, 5, skipNext5) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15( W6, 6, skipNext6) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15( W7, 7, skipNext7) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W8, 8, skipNext8) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W9, 9, skipNext9) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15(W10, 10, skipNext10) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15(W11, 11, skipNext11) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15(W12, 12, skipNext12) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15(W13, 13, skipNext13) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15(W14, 14, skipNext14) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15(W15, 15, skipNext15) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + kmovq k1, mask + JMP lloop + +lastLoop: + // Process last 16 rounds + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + // Write out digest + vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A + vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B + vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C + vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D + vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E + vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F + vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G + vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H + + VZEROUPPER + RET + +// +// Tables +// + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/sha256/sha256blockAvx512_amd64.go b/sha256/sha256blockAvx512_amd64.go new file mode 100644 index 0000000..ff6e805 --- /dev/null +++ b/sha256/sha256blockAvx512_amd64.go @@ -0,0 +1,508 @@ +//go:build !noasm && !appengine && gc +// +build !noasm,!appengine,gc + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "encoding/binary" + "errors" + "hash" + "sort" + "sync/atomic" + "time" +) + +//go:noescape +func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, + inputs [16][]byte) + +// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to +// differentiate with default initialiation value of 0 +const Avx512ServerUID = 16 + +var uidCounter uint64 + +// NewAvx512 - initialize sha256 Avx512 implementation. +func NewAvx512(a512srv *Avx512Server) hash.Hash { + uid := atomic.AddUint64(&uidCounter, 1) + return &Avx512Digest{uid: uid, a512srv: a512srv} +} + +// Avx512Digest - Type for computing SHA256 using Avx512 +type Avx512Digest struct { + uid uint64 + a512srv *Avx512Server + x [chunk]byte + nx int + len uint64 + final bool + result [Size]byte +} + +// Size - Return size of checksum +func (d *Avx512Digest) Size() int { return Size } + +// BlockSize - Return blocksize of checksum +func (d Avx512Digest) BlockSize() int { return BlockSize } + +// Reset - reset sha digest to its initial values +func (d *Avx512Digest) Reset() { + d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true} + d.nx = 0 + d.len = 0 + d.final = false +} + +// Write to digest +func (d *Avx512Digest) Write(p []byte) (nn int, err error) { + + if d.final { + return 0, errors.New("Avx512Digest already finalized. Reset first before writing again") + } + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]} + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]} + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Sum - Return sha256 sum in bytes +func (d *Avx512Digest) Sum(in []byte) (result []byte) { + + if d.final { + return append(in, d.result[:]...) + } + + trail := make([]byte, 0, 128) + trail = append(trail, d.x[:d.nx]...) + + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + trail = append(trail, tmp[0:56-len%64]...) + } else { + trail = append(trail, tmp[0:64+56-len%64]...) + } + d.nx = 0 + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + trail = append(trail, tmp[0:8]...) + + sumCh := make(chan [Size]byte) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh} + d.result = <-sumCh + d.final = true + return append(in, d.result[:]...) +} + +var table = [512]uint64{ + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2} + +// Interface function to assembly ode +func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte { + + scratch := [512]byte{} + sha256X16Avx512(digests, &scratch, &table, mask, input) + + output := [16][Size]byte{} + for i := 0; i < 16; i++ { + output[i] = getDigest(i, digests[:]) + } + + return output +} + +func getDigest(index int, state []byte) (sum [Size]byte) { + for j := 0; j < 16; j += 2 { + for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size { + binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4])) + } + } + return +} + +// Message to send across input channel +type blockInput struct { + uid uint64 + msg []byte + reset bool + final bool + sumCh chan [Size]byte +} + +// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations +type Avx512Server struct { + blocksCh chan blockInput // Input channel + totalIn int // Total number of inputs waiting to be processed + lanes [16]Avx512LaneInfo // Array with info per lane (out of 16) + digests map[uint64][Size]byte // Map of uids to (interim) digest results +} + +// Avx512LaneInfo - Info for each lane +type Avx512LaneInfo struct { + uid uint64 // unique identification for this SHA processing + block []byte // input block to be processed + outputCh chan [Size]byte // channel for output result +} + +// NewAvx512Server - Create new object for parallel processing handling +func NewAvx512Server() *Avx512Server { + a512srv := &Avx512Server{} + a512srv.digests = make(map[uint64][Size]byte) + a512srv.blocksCh = make(chan blockInput) + + // Start a single thread for reading from the input channel + go a512srv.Process() + return a512srv +} + +// Process - Sole handler for reading from the input channel +func (a512srv *Avx512Server) Process() { + for { + select { + case block := <-a512srv.blocksCh: + if block.reset { + a512srv.reset(block.uid) + continue + } + index := block.uid & 0xf + // fmt.Println("Adding message:", block.uid, index) + + if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs + // fmt.Println("Invoking Blocks()") + a512srv.blocks() + } + a512srv.totalIn++ + a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg} + if block.final { + a512srv.lanes[index].outputCh = block.sumCh + } + if a512srv.totalIn == len(a512srv.lanes) { + // fmt.Println("Invoking Blocks() while FULL: ") + a512srv.blocks() + } + + // TODO: test with larger timeout + case <-time.After(1 * time.Microsecond): + for _, lane := range a512srv.lanes { + if lane.block != nil { // check if there is any input to process + // fmt.Println("Invoking Blocks() on TIMEOUT: ") + a512srv.blocks() + break // we are done + } + } + } + } +} + +// Do a reset for this calculation +func (a512srv *Avx512Server) reset(uid uint64) { + + // Check if there is a message still waiting to be processed (and remove if so) + for i, lane := range a512srv.lanes { + if lane.uid == uid { + if lane.block != nil { + a512srv.lanes[i] = Avx512LaneInfo{} // clear message + a512srv.totalIn-- + } + } + } + + // Delete entry from hash map + delete(a512srv.digests, uid) +} + +// Invoke assembly and send results back +func (a512srv *Avx512Server) blocks() { + + inputs := [16][]byte{} + for i := range inputs { + inputs[i] = a512srv.lanes[i].block + } + + mask := expandMask(genMask(inputs)) + outputs := blockAvx512(a512srv.getDigests(), inputs, mask) + + a512srv.totalIn = 0 + for i := 0; i < len(outputs); i++ { + uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh + a512srv.digests[uid] = outputs[i] + a512srv.lanes[i] = Avx512LaneInfo{} + + if outputCh != nil { + // Send back result + outputCh <- outputs[i] + delete(a512srv.digests, uid) // Delete entry from hashmap + } + } +} + +func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) { + a512srv.blocksCh <- blockInput{uid: uid, msg: p} + return len(p), nil +} + +// Sum - return sha256 sum in bytes for a given sum id. +func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte { + sumCh := make(chan [32]byte) + a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh} + return <-sumCh +} + +func (a512srv *Avx512Server) getDigests() *[512]byte { + digests := [512]byte{} + for i, lane := range a512srv.lanes { + a, ok := a512srv.digests[lane.uid] + if ok { + binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4])) + binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8])) + binary.BigEndian.PutUint32(digests[(i+2*16)*4:], + binary.LittleEndian.Uint32(a[8:12])) + binary.BigEndian.PutUint32(digests[(i+3*16)*4:], + binary.LittleEndian.Uint32(a[12:16])) + binary.BigEndian.PutUint32(digests[(i+4*16)*4:], + binary.LittleEndian.Uint32(a[16:20])) + binary.BigEndian.PutUint32(digests[(i+5*16)*4:], + binary.LittleEndian.Uint32(a[20:24])) + binary.BigEndian.PutUint32(digests[(i+6*16)*4:], + binary.LittleEndian.Uint32(a[24:28])) + binary.BigEndian.PutUint32(digests[(i+7*16)*4:], + binary.LittleEndian.Uint32(a[28:32])) + } else { + binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) + binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) + binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) + binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) + binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) + binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) + binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) + binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) + } + } + return &digests +} + +// Helper struct for sorting blocks based on length +type lane struct { + len uint + pos uint +} + +type lanes []lane + +func (lns lanes) Len() int { return len(lns) } +func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] } +func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len } + +// Helper struct for +type maskRounds struct { + mask uint64 + rounds uint64 +} + +func genMask(input [16][]byte) [16]maskRounds { + + // Sort on blocks length small to large + var sorted [16]lane + for c, inpt := range input { + sorted[c] = lane{uint(len(inpt)), uint(c)} + } + sort.Sort(lanes(sorted[:])) + + // Create mask array including 'rounds' between masks + m, round, index := uint64(0xffff), uint64(0), 0 + var mr [16]maskRounds + for _, s := range sorted { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round} + index++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + + return mr +} + +// TODO: remove function +func expandMask(mr [16]maskRounds) []uint64 { + size := uint64(0) + for _, r := range mr { + size += r.rounds + } + result, index := make([]uint64, size), 0 + for _, r := range mr { + for j := uint64(0); j < r.rounds; j++ { + result[index] = r.mask + index++ + } + } + return result +} diff --git a/sha256/sha256blockAvx512_amd64.s b/sha256/sha256blockAvx512_amd64.s new file mode 100644 index 0000000..cca534e --- /dev/null +++ b/sha256/sha256blockAvx512_amd64.s @@ -0,0 +1,267 @@ +//+build !noasm,!appengine,gc + +TEXT ·sha256X16Avx512(SB), 7, $0 + MOVQ digests+0(FP), DI + MOVQ scratch+8(FP), R12 + MOVQ mask_len+32(FP), SI + MOVQ mask_base+24(FP), R13 + MOVQ (R13), R14 + LONG $0x92fbc1c4; BYTE $0xce + LEAQ inputs+48(FP), AX + QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07 + MOVQ table+16(FP), DX + WORD $0x3148; BYTE $0xc9 + TESTQ $(1<<0), R14 + JE skipInput0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipInput0: + TESTQ $(1<<1), R14 + JE skipInput1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipInput1: + TESTQ $(1<<2), R14 + JE skipInput2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipInput2: + TESTQ $(1<<3), R14 + JE skipInput3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipInput3: + TESTQ $(1<<4), R14 + JE skipInput4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipInput4: + TESTQ $(1<<5), R14 + JE skipInput5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipInput5: + TESTQ $(1<<6), R14 + JE skipInput6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipInput6: + TESTQ $(1<<7), R14 + JE skipInput7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipInput7: + TESTQ $(1<<8), R14 + JE skipInput8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipInput8: + TESTQ $(1<<9), R14 + JE skipInput9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipInput9: + TESTQ $(1<<10), R14 + JE skipInput10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipInput10: + TESTQ $(1<<11), R14 + JE skipInput11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipInput11: + TESTQ $(1<<12), R14 + JE skipInput12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipInput12: + TESTQ $(1<<13), R14 + JE skipInput13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipInput13: + TESTQ $(1<<14), R14 + JE skipInput14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipInput14: + TESTQ $(1<<15), R14 + JE skipInput15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipInput15: +lloop: + LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX + LONG $0x487e7162; WORD $0x1a6f + MOVQ table+16(FP), DX + QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88 + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 + QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005 + JE lastLoop + ADDQ $8, R13 + MOVQ (R13), R14 + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31 + TESTQ $(1<<0), R14 + JE skipNext0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipNext0: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32 + TESTQ $(1<<1), R14 + JE skipNext1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipNext1: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33 + TESTQ $(1<<2), R14 + JE skipNext2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipNext2: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34 + TESTQ $(1<<3), R14 + JE skipNext3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipNext3: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35 + TESTQ $(1<<4), R14 + JE skipNext4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipNext4: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36 + TESTQ $(1<<5), R14 + JE skipNext5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipNext5: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37 + TESTQ $(1<<6), R14 + JE skipNext6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipNext6: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38 + TESTQ $(1<<7), R14 + JE skipNext7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipNext7: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39 + TESTQ $(1<<8), R14 + JE skipNext8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipNext8: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a + TESTQ $(1<<9), R14 + JE skipNext9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipNext9: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b + TESTQ $(1<<10), R14 + JE skipNext10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipNext10: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c + TESTQ $(1<<11), R14 + JE skipNext11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipNext11: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d + TESTQ $(1<<12), R14 + JE skipNext12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipNext12: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e + TESTQ $(1<<13), R14 + JE skipNext13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipNext13: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f + TESTQ $(1<<14), R14 + JE skipNext14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipNext14: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40 + TESTQ $(1<<15), R14 + JE skipNext15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipNext15: + QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1 + JMP lloop + +lastLoop: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f + VZEROUPPER + RET + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/sha256/sha256blockAvx512_amd64_test.go b/sha256/sha256blockAvx512_amd64_test.go new file mode 100644 index 0000000..ee4da23 --- /dev/null +++ b/sha256/sha256blockAvx512_amd64_test.go @@ -0,0 +1,443 @@ +//go:build !noasm && !appengine && gc +// +build !noasm,!appengine,gc + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "hash" + "reflect" + "sync" + "testing" +) + +func TestGoldenAVX512(t *testing.T) { + + if !hasAvx512 { + // t.SkipNow() + return + } + + server := NewAvx512Server() + h512 := NewAvx512(server) + + for _, g := range golden { + h512.Reset() + h512.Write([]byte(g.in)) + digest := h512.Sum([]byte{}) + s := fmt.Sprintf("%x", digest) + if !reflect.DeepEqual(digest, g.out[:]) { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", g.in, s, + hex.EncodeToString(g.out[:])) + } + } +} + +func createInputs(size int) [16][]byte { + input := [16][]byte{} + for i := 0; i < 16; i++ { + input[i] = make([]byte, size) + } + return input +} + +func initDigests() *[512]byte { + digests := [512]byte{} + for i := 0; i < 16; i++ { + binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) + binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) + binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) + binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) + binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) + binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) + binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) + binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) + } + return &digests +} + +func testSha256Avx512(t *testing.T, offset, padding int) [16][]byte { + + if !hasAvx512 { + // t.SkipNow() + return [16][]byte{} + } + + l := uint(len(golden[offset].in)) + extraBlock := uint(0) + if padding == 0 { + extraBlock += 9 + } else { + extraBlock += 64 + } + input := createInputs(int(l + extraBlock)) + for i := 0; i < 16; i++ { + copy(input[i], golden[offset+i].in) + input[i][l] = 0x80 + copy(input[i][l+1:], bytes.Repeat([]byte{0}, padding)) + + // Length in bits. + len := uint64(l) + len <<= 3 + for ii := uint(0); ii < 8; ii++ { + input[i][l+1+uint(padding)+ii] = byte(len >> (56 - 8*ii)) + } + } + mask := make([]uint64, len(input[0])>>6) + for m := range mask { + mask[m] = 0xffff + } + output := blockAvx512(initDigests(), input, mask) + for i := 0; i < 16; i++ { + if bytes.Compare(output[i][:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, + hex.EncodeToString(output[i][:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } + return input +} + +func TestAvx512_1Block(t *testing.T) { testSha256Avx512(t, 31, 0) } +func TestAvx512_3Blocks(t *testing.T) { testSha256Avx512(t, 47, 55) } + +func TestAvx512_MixedBlocks(t *testing.T) { + + if !hasAvx512 { + // t.SkipNow() + return + } + + inputSingleBlock := testSha256Avx512(t, 31, 0) + inputMultiBlock := testSha256Avx512(t, 47, 55) + + input := [16][]byte{} + + for i := range input { + if i%2 == 0 { + input[i] = inputMultiBlock[i] + } else { + input[i] = inputSingleBlock[i] + } + } + + mask := [3]uint64{0xffff, 0x5555, 0x5555} + output := blockAvx512(initDigests(), input, mask[:]) + var offset int + for i := 0; i < len(output); i++ { + if i%2 == 0 { + offset = 47 + } else { + offset = 31 + } + if bytes.Compare(output[i][:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, + hex.EncodeToString(output[i][:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } +} + +func TestAvx512_MixedWithNilBlocks(t *testing.T) { + + if !hasAvx512 { + // t.SkipNow() + return + } + + inputSingleBlock := testSha256Avx512(t, 31, 0) + inputMultiBlock := testSha256Avx512(t, 47, 55) + + input := [16][]byte{} + + for i := range input { + if i%3 == 0 { + input[i] = inputMultiBlock[i] + } else if i%3 == 1 { + input[i] = inputSingleBlock[i] + } else { + input[i] = nil + } + } + + mask := [3]uint64{0xb6db, 0x9249, 0x9249} + output := blockAvx512(initDigests(), input, mask[:]) + var offset int + for i := 0; i < len(output); i++ { + if i%3 == 2 { // for nil inputs + initvec := [32]byte{0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae, 0x85, + 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, + 0x51, 0x0e, 0x52, 0x7f, 0x9b, 0x05, 0x68, 0x8c, + 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19} + if bytes.Compare(output[i][:], initvec[:]) != 0 { + t.Fatalf("Sum256 function: sha256 for nil vector = %s want %s", + hex.EncodeToString(output[i][:]), hex.EncodeToString(initvec[:])) + } + continue + } + if i%3 == 0 { + offset = 47 + } else { + offset = 31 + } + if bytes.Compare(output[i][:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, + hex.EncodeToString(output[i][:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } +} + +func TestAvx512Server(t *testing.T) { + + if !hasAvx512 { + // t.SkipNow() + return + } + + const offset = 31 + 16 + server := NewAvx512Server() + + // First block of 64 bytes + for i := 0; i < 16; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in) + server.Write(uint64(Avx512ServerUID+i), input) + } + + // Second block of 64 bytes + for i := 0; i < 16; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in[64:]) + server.Write(uint64(Avx512ServerUID+i), input) + } + + wg := sync.WaitGroup{} + wg.Add(16) + + // Third and final block + for i := 0; i < 16; i++ { + input := make([]byte, 64) + input[0] = 0x80 + copy(input[1:], bytes.Repeat([]byte{0}, 63-8)) + + // Length in bits. + len := uint64(128) + len <<= 3 + for ii := uint(0); ii < 8; ii++ { + input[63-8+1+ii] = byte(len >> (56 - 8*ii)) + } + go func(i int, uid uint64, input []byte) { + output := server.Sum(uid, input) + if bytes.Compare(output[:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, + hex.EncodeToString(output[:]), hex.EncodeToString(golden[offset+i].out[:])) + } + wg.Done() + }(i, uint64(Avx512ServerUID+i), input) + } + + wg.Wait() +} + +func TestAvx512Digest(t *testing.T) { + + if !hasAvx512 { + // t.SkipNow() + return + } + + server := NewAvx512Server() + + const tests = 16 + h512 := [16]hash.Hash{} + for i := 0; i < tests; i++ { + h512[i] = NewAvx512(server) + } + + const offset = 31 + 16 + for i := 0; i < tests; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in) + h512[i].Write(input) + } + for i := 0; i < tests; i++ { + input := make([]byte, 64) + copy(input, golden[offset+i].in[64:]) + h512[i].Write(input) + } + for i := 0; i < tests; i++ { + output := h512[i].Sum([]byte{}) + if bytes.Compare(output[:], golden[offset+i].out[:]) != 0 { + t.Fatalf("Sum256 function: sha256(%s) = %s want %s", golden[offset+i].in, + hex.EncodeToString(output[:]), hex.EncodeToString(golden[offset+i].out[:])) + } + } +} + +func benchmarkAvx512SingleCore(h512 []hash.Hash, body []byte) { + + for i := 0; i < len(h512); i++ { + h512[i].Write(body) + } + for i := 0; i < len(h512); i++ { + _ = h512[i].Sum([]byte{}) + } +} + +func benchmarkAvx512(b *testing.B, size int) { + + if !hasAvx512 { + b.SkipNow() + return + } + + server := NewAvx512Server() + + const tests = 16 + body := make([]byte, size) + + b.SetBytes(int64(len(body) * tests)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + h512 := make([]hash.Hash, tests) + for i := 0; i < tests; i++ { + h512[i] = NewAvx512(server) + } + + benchmarkAvx512SingleCore(h512, body) + } +} + +func BenchmarkAvx512_05M(b *testing.B) { benchmarkAvx512(b, 512*1024) } +func BenchmarkAvx512_1M(b *testing.B) { benchmarkAvx512(b, 1*1024*1024) } +func BenchmarkAvx512_5M(b *testing.B) { benchmarkAvx512(b, 5*1024*1024) } +func BenchmarkAvx512_10M(b *testing.B) { benchmarkAvx512(b, 10*1024*1024) } + +func benchmarkAvx512MultiCore(b *testing.B, size, cores int) { + + if !hasAvx512 { + b.SkipNow() + return + } + + servers := make([]*Avx512Server, cores) + for c := 0; c < cores; c++ { + servers[c] = NewAvx512Server() + } + + const tests = 16 + + body := make([]byte, size) + + h512 := make([]hash.Hash, tests*cores) + for i := 0; i < tests*cores; i++ { + h512[i] = NewAvx512(servers[i>>4]) + } + + b.SetBytes(int64(size * 16 * cores)) + b.ResetTimer() + + var wg sync.WaitGroup + + for i := 0; i < b.N; i++ { + wg.Add(cores) + for c := 0; c < cores; c++ { + go func(c int) { + benchmarkAvx512SingleCore(h512[c*tests:(c+1)*tests], + body) + wg.Done() + }(c) + } + wg.Wait() + } +} + +func BenchmarkAvx512_5M_2Cores(b *testing.B) { benchmarkAvx512MultiCore(b, 5*1024*1024, 2) } +func BenchmarkAvx512_5M_4Cores(b *testing.B) { benchmarkAvx512MultiCore(b, 5*1024*1024, 4) } +func BenchmarkAvx512_5M_6Cores(b *testing.B) { benchmarkAvx512MultiCore(b, 5*1024*1024, 6) } + +type maskTest struct { + in [16]int + out [16]maskRounds +} + +var goldenMask = []maskTest{ + {[16]int{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [16]maskRounds{}}, + {[16]int{64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0}, + [16]maskRounds{{0x5555, 1}}}, + {[16]int{0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64, 0, 64}, + [16]maskRounds{{0xaaaa, 1}}}, + {[16]int{64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64}, + [16]maskRounds{{0xffff, 1}}}, + {[16]int{128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, + [16]maskRounds{{0xffff, 2}}}, + {[16]int{64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128}, + [16]maskRounds{{0xffff, 1}, {0xaaaa, 1}}}, + {[16]int{128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64, 128, 64}, + [16]maskRounds{{0xffff, 1}, {0x5555, 1}}}, + {[16]int{64, 192, 64, 192, 64, 192, 64, 192, 64, 192, 64, 192, 64, 192, 64, 192}, + [16]maskRounds{{0xffff, 1}, {0xaaaa, 2}}}, + // + // >= 64 0110=6 1011=b 1101=d 0110=6 + // >=128 0100=4 0010=2 1001=9 0100=4 + {[16]int{0, 64, 128, 0, 64, 128, 0, 64, 128, 0, 64, 128, 0, 64, 128, 0}, + [16]maskRounds{{0x6db6, 1}, {0x4924, 1}}}, + {[16]int{1 * 64, 2 * 64, 3 * 64, 4 * 64, 5 * 64, 6 * 64, 7 * 64, 8 * 64, 9 * 64, 10 * 64, + 11 * 64, 12 * 64, 13 * 64, 14 * 64, 15 * 64, 16 * 64}, + [16]maskRounds{{0xffff, 1}, {0xfffe, 1}, {0xfffc, 1}, {0xfff8, 1}, {0xfff0, 1}, + {0xffe0, 1}, {0xffc0, 1}, {0xff80, 1}, + {0xff00, 1}, {0xfe00, 1}, {0xfc00, 1}, {0xf800, 1}, {0xf000, 1}, {0xe000, 1}, + {0xc000, 1}, {0x8000, 1}}}, + {[16]int{2 * 64, 1 * 64, 3 * 64, 4 * 64, 5 * 64, 6 * 64, 7 * 64, 8 * 64, 9 * 64, 10 * 64, + 11 * 64, 12 * 64, 13 * 64, 14 * 64, 15 * 64, 16 * 64}, + [16]maskRounds{{0xffff, 1}, {0xfffd, 1}, {0xfffc, 1}, {0xfff8, 1}, {0xfff0, 1}, + {0xffe0, 1}, {0xffc0, 1}, {0xff80, 1}, + {0xff00, 1}, {0xfe00, 1}, {0xfc00, 1}, {0xf800, 1}, {0xf000, 1}, {0xe000, 1}, + {0xc000, 1}, {0x8000, 1}}}, + {[16]int{10 * 64, 20 * 64, 30 * 64, 40 * 64, 50 * 64, 60 * 64, 70 * 64, 80 * 64, 90 * 64, + 100 * 64, 110 * 64, 120 * 64, 130 * 64, 140 * 64, 150 * 64, 160 * 64}, + [16]maskRounds{{0xffff, 10}, {0xfffe, 10}, {0xfffc, 10}, {0xfff8, 10}, {0xfff0, 10}, + {0xffe0, 10}, {0xffc0, 10}, {0xff80, 10}, + {0xff00, 10}, {0xfe00, 10}, {0xfc00, 10}, {0xf800, 10}, {0xf000, 10}, {0xe000, 10}, + {0xc000, 10}, {0x8000, 10}}}, + {[16]int{10 * 64, 19 * 64, 27 * 64, 34 * 64, 40 * 64, 45 * 64, 49 * 64, 52 * 64, 54 * 64, + 55 * 64, 57 * 64, 60 * 64, 64 * 64, 69 * 64, 75 * 64, 82 * 64}, + [16]maskRounds{{0xffff, 10}, {0xfffe, 9}, {0xfffc, 8}, {0xfff8, 7}, {0xfff0, 6}, + {0xffe0, 5}, {0xffc0, 4}, {0xff80, 3}, + {0xff00, 2}, {0xfe00, 1}, {0xfc00, 2}, {0xf800, 3}, {0xf000, 4}, {0xe000, 5}, + {0xc000, 6}, {0x8000, 7}}}, +} + +func TestMaskGen(t *testing.T) { + input := [16][]byte{} + for gcase, g := range goldenMask { + for i, l := range g.in { + buf := make([]byte, l) + input[i] = buf[:] + } + + mr := genMask(input) + + if !reflect.DeepEqual(mr, g.out) { + t.Fatalf("case %d: got %04x\n want %04x", gcase, mr, g.out) + } + } +} diff --git a/sha256/sha256block_amd64.go b/sha256/sha256block_amd64.go new file mode 100644 index 0000000..e536f54 --- /dev/null +++ b/sha256/sha256block_amd64.go @@ -0,0 +1,31 @@ +//go:build !noasm && !appengine && gc +// +build !noasm,!appengine,gc + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockArmSha2Go(dig *digest, p []byte) { + panic("blockArmSha2Go called unexpectedly") +} + +//go:noescape +func blockIntelSha(h *[8]uint32, message []uint8) + +func blockIntelShaGo(dig *digest, p []byte) { + blockIntelSha(&dig.h, p) +} diff --git a/sha256/sha256block_amd64.s b/sha256/sha256block_amd64.s new file mode 100644 index 0000000..c98a1d8 --- /dev/null +++ b/sha256/sha256block_amd64.s @@ -0,0 +1,266 @@ +//+build !noasm,!appengine,gc + +// SHA intrinsic version of SHA256 + +// Kristofer Peterson, (C) 2018. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "textflag.h" + +DATA K<>+0x00(SB)/4, $0x428a2f98 +DATA K<>+0x04(SB)/4, $0x71374491 +DATA K<>+0x08(SB)/4, $0xb5c0fbcf +DATA K<>+0x0c(SB)/4, $0xe9b5dba5 +DATA K<>+0x10(SB)/4, $0x3956c25b +DATA K<>+0x14(SB)/4, $0x59f111f1 +DATA K<>+0x18(SB)/4, $0x923f82a4 +DATA K<>+0x1c(SB)/4, $0xab1c5ed5 +DATA K<>+0x20(SB)/4, $0xd807aa98 +DATA K<>+0x24(SB)/4, $0x12835b01 +DATA K<>+0x28(SB)/4, $0x243185be +DATA K<>+0x2c(SB)/4, $0x550c7dc3 +DATA K<>+0x30(SB)/4, $0x72be5d74 +DATA K<>+0x34(SB)/4, $0x80deb1fe +DATA K<>+0x38(SB)/4, $0x9bdc06a7 +DATA K<>+0x3c(SB)/4, $0xc19bf174 +DATA K<>+0x40(SB)/4, $0xe49b69c1 +DATA K<>+0x44(SB)/4, $0xefbe4786 +DATA K<>+0x48(SB)/4, $0x0fc19dc6 +DATA K<>+0x4c(SB)/4, $0x240ca1cc +DATA K<>+0x50(SB)/4, $0x2de92c6f +DATA K<>+0x54(SB)/4, $0x4a7484aa +DATA K<>+0x58(SB)/4, $0x5cb0a9dc +DATA K<>+0x5c(SB)/4, $0x76f988da +DATA K<>+0x60(SB)/4, $0x983e5152 +DATA K<>+0x64(SB)/4, $0xa831c66d +DATA K<>+0x68(SB)/4, $0xb00327c8 +DATA K<>+0x6c(SB)/4, $0xbf597fc7 +DATA K<>+0x70(SB)/4, $0xc6e00bf3 +DATA K<>+0x74(SB)/4, $0xd5a79147 +DATA K<>+0x78(SB)/4, $0x06ca6351 +DATA K<>+0x7c(SB)/4, $0x14292967 +DATA K<>+0x80(SB)/4, $0x27b70a85 +DATA K<>+0x84(SB)/4, $0x2e1b2138 +DATA K<>+0x88(SB)/4, $0x4d2c6dfc +DATA K<>+0x8c(SB)/4, $0x53380d13 +DATA K<>+0x90(SB)/4, $0x650a7354 +DATA K<>+0x94(SB)/4, $0x766a0abb +DATA K<>+0x98(SB)/4, $0x81c2c92e +DATA K<>+0x9c(SB)/4, $0x92722c85 +DATA K<>+0xa0(SB)/4, $0xa2bfe8a1 +DATA K<>+0xa4(SB)/4, $0xa81a664b +DATA K<>+0xa8(SB)/4, $0xc24b8b70 +DATA K<>+0xac(SB)/4, $0xc76c51a3 +DATA K<>+0xb0(SB)/4, $0xd192e819 +DATA K<>+0xb4(SB)/4, $0xd6990624 +DATA K<>+0xb8(SB)/4, $0xf40e3585 +DATA K<>+0xbc(SB)/4, $0x106aa070 +DATA K<>+0xc0(SB)/4, $0x19a4c116 +DATA K<>+0xc4(SB)/4, $0x1e376c08 +DATA K<>+0xc8(SB)/4, $0x2748774c +DATA K<>+0xcc(SB)/4, $0x34b0bcb5 +DATA K<>+0xd0(SB)/4, $0x391c0cb3 +DATA K<>+0xd4(SB)/4, $0x4ed8aa4a +DATA K<>+0xd8(SB)/4, $0x5b9cca4f +DATA K<>+0xdc(SB)/4, $0x682e6ff3 +DATA K<>+0xe0(SB)/4, $0x748f82ee +DATA K<>+0xe4(SB)/4, $0x78a5636f +DATA K<>+0xe8(SB)/4, $0x84c87814 +DATA K<>+0xec(SB)/4, $0x8cc70208 +DATA K<>+0xf0(SB)/4, $0x90befffa +DATA K<>+0xf4(SB)/4, $0xa4506ceb +DATA K<>+0xf8(SB)/4, $0xbef9a3f7 +DATA K<>+0xfc(SB)/4, $0xc67178f2 +GLOBL K<>(SB), RODATA|NOPTR, $256 + +DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203 +DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b +GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16 + +// Register Usage +// BX base address of constant table (constant) +// DX hash_state (constant) +// SI hash_data.data +// DI hash_data.data + hash_data.length - 64 (constant) +// X0 scratch +// X1 scratch +// X2 working hash state // ABEF +// X3 working hash state // CDGH +// X4 first 16 bytes of block +// X5 second 16 bytes of block +// X6 third 16 bytes of block +// X7 fourth 16 bytes of block +// X12 saved hash state // ABEF +// X13 saved hash state // CDGH +// X15 data shuffle mask (constant) + +TEXT ·blockIntelSha(SB), NOSPLIT, $0-32 + MOVQ h+0(FP), DX + MOVQ message_base+8(FP), SI + MOVQ message_len+16(FP), DI + LEAQ -64(SI)(DI*1), DI + MOVOU (DX), X2 + MOVOU 16(DX), X1 + MOVO X2, X3 + PUNPCKLLQ X1, X2 + PUNPCKHLQ X1, X3 + PSHUFD $0x27, X2, X2 + PSHUFD $0x27, X3, X3 + MOVO SHUF_MASK<>(SB), X15 + LEAQ K<>(SB), BX + + JMP TEST + +LOOP: + MOVO X2, X12 + MOVO X3, X13 + + // load block and shuffle + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOU 32(SI), X6 + MOVOU 48(SI), X7 + PSHUFB X15, X4 + PSHUFB X15, X5 + PSHUFB X15, X6 + PSHUFB X15, X7 + +#define ROUND456 \ + PADDL X5, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 \ + LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 \ + LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + +#define ROUND567 \ + PADDL X6, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 \ + LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 \ + LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + +#define ROUND674 \ + PADDL X7, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X7, X1 \ + LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4 + PADDL X1, X4 \ + LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7 + +#define ROUND745 \ + PADDL X4, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X4, X1 \ + LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4 + PADDL X1, X5 \ + LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4 + + // rounds 0-3 + MOVO (BX), X0 + PADDL X4, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 4-7 + MOVO 1*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + + // rounds 8-11 + MOVO 2*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + + MOVO 3*16(BX), X0; ROUND674 // rounds 12-15 + MOVO 4*16(BX), X0; ROUND745 // rounds 16-19 + MOVO 5*16(BX), X0; ROUND456 // rounds 20-23 + MOVO 6*16(BX), X0; ROUND567 // rounds 24-27 + MOVO 7*16(BX), X0; ROUND674 // rounds 28-31 + MOVO 8*16(BX), X0; ROUND745 // rounds 32-35 + MOVO 9*16(BX), X0; ROUND456 // rounds 36-39 + MOVO 10*16(BX), X0; ROUND567 // rounds 40-43 + MOVO 11*16(BX), X0; ROUND674 // rounds 44-47 + MOVO 12*16(BX), X0; ROUND745 // rounds 48-51 + + // rounds 52-55 + MOVO 13*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 + LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 + LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 56-59 + MOVO 14*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 + LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 + LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 60-63 + MOVO 15*16(BX), X0 + PADDL X7, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + PADDL X12, X2 + PADDL X13, X3 + + ADDQ $64, SI + +TEST: + CMPQ SI, DI + JBE LOOP + + PSHUFD $0x4e, X3, X0 + LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0 + PSHUFD $0x4e, X2, X1 + LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f + PSHUFD $0x1b, X0, X0 + PSHUFD $0x1b, X1, X1 + + MOVOU X0, (DX) + MOVOU X1, 16(DX) + + RET diff --git a/sha256/sha256block_amd64_test.go b/sha256/sha256block_amd64_test.go new file mode 100644 index 0000000..da3d60c --- /dev/null +++ b/sha256/sha256block_amd64_test.go @@ -0,0 +1,78 @@ +//go:build !noasm && !appengine && gc +// +build !noasm,!appengine,gc + +package sha256 + +import ( + "crypto/sha256" + "encoding/binary" + "testing" +) + +func sha256hash(m []byte) (r [32]byte) { + var h [8]uint32 + + h[0] = 0x6a09e667 + h[1] = 0xbb67ae85 + h[2] = 0x3c6ef372 + h[3] = 0xa54ff53a + h[4] = 0x510e527f + h[5] = 0x9b05688c + h[6] = 0x1f83d9ab + h[7] = 0x5be0cd19 + + blockIntelSha(&h, m) + l0 := len(m) + l := l0 & (BlockSize - 1) + m = m[l0-l:] + + var k [64]byte + copy(k[:], m) + + k[l] = 0x80 + + if l >= 56 { + blockIntelSha(&h, k[:]) + binary.LittleEndian.PutUint64(k[0:8], 0) + binary.LittleEndian.PutUint64(k[8:16], 0) + binary.LittleEndian.PutUint64(k[16:24], 0) + binary.LittleEndian.PutUint64(k[24:32], 0) + binary.LittleEndian.PutUint64(k[32:40], 0) + binary.LittleEndian.PutUint64(k[40:48], 0) + binary.LittleEndian.PutUint64(k[48:56], 0) + } + binary.BigEndian.PutUint64(k[56:64], uint64(l0)<<3) + blockIntelSha(&h, k[:]) + + binary.BigEndian.PutUint32(r[0:4], h[0]) + binary.BigEndian.PutUint32(r[4:8], h[1]) + binary.BigEndian.PutUint32(r[8:12], h[2]) + binary.BigEndian.PutUint32(r[12:16], h[3]) + binary.BigEndian.PutUint32(r[16:20], h[4]) + binary.BigEndian.PutUint32(r[20:24], h[5]) + binary.BigEndian.PutUint32(r[24:28], h[6]) + binary.BigEndian.PutUint32(r[28:32], h[7]) + + return +} + +func runTestSha(hashfunc func([]byte) [32]byte) bool { + var m = []byte("This is a message. This is a message. This is a message. This is a message.") + + ar := hashfunc(m) + br := sha256.Sum256(m) + + return ar == br +} + +func TestSha0(t *testing.T) { + if !runTestSha(Sum256) { + t.Errorf("FAILED") + } +} + +func TestSha1(t *testing.T) { + if hasIntelSha && !runTestSha(sha256hash) { + t.Errorf("FAILED") + } +} diff --git a/sha256/sha256block_arm64.go b/sha256/sha256block_arm64.go new file mode 100644 index 0000000..9ef29f2 --- /dev/null +++ b/sha256/sha256block_arm64.go @@ -0,0 +1,38 @@ +//go:build !noasm && !appengine && gc +// +build !noasm,!appengine,gc + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockIntelShaGo(dig *digest, p []byte) { + panic("blockIntelShaGo called unexpectedly") +} + +//go:noescape +func blockArmSha2(h []uint32, message []uint8) + +func blockArmSha2Go(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], + dig.h[7]} + + blockArmSha2(h[:], p[:]) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], + h[5], h[6], h[7] +} diff --git a/sha256/sha256block_arm64.s b/sha256/sha256block_arm64.s new file mode 100644 index 0000000..7ab88b1 --- /dev/null +++ b/sha256/sha256block_arm64.s @@ -0,0 +1,192 @@ +//+build !noasm,!appengine,gc + +// ARM64 version of SHA256 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// Based on implementation as found in https://github.com/jocover/sha256-armv8 +// +// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to +// their Plan9 equivalents +// + +TEXT ·blockArmSha2(SB), 7, $0 + MOVD h+0(FP), R0 + MOVD message+24(FP), R1 + MOVD message_len+32(FP), R2 // length of message + SUBS $64, R2 + BMI complete + + // Load constants table pointer + MOVD $·constants(SB), R3 + + // Cache constants table in registers v16 - v31 + WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64 + WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16 + WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64 + + WORD $0x4c407801 // ld1 {v1.4s}, [x0] + WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64 + WORD $0xd1004000 // sub x0, x0, #0x10 + WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64 + +loop: + // Main loop + WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64 + WORD $0x4ea01c02 // mov v2.16b, v0.16b + WORD $0x4ea11c23 // mov v3.16b, v1.16b + WORD $0x6e2008a5 // rev32 v5.16b, v5.16b + WORD $0x6e2008c6 // rev32 v6.16b, v6.16b + WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s + WORD $0x6e2008e7 // rev32 v7.16b, v7.16b + WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x6e200908 // rev32 v8.16b, v8.16b + WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + + SUBS $64, R2 + BPL loop + + // Store result + WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0] + +complete: + RET + +// Constants table +DATA ·constants+0x0(SB)/8, $0x71374491428a2f98 +DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b +DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98 +DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be +DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152 +DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA ·constants+0x78(SB)/8, $0x1429296706ca6351 +DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85 +DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc +DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354 +DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e +DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819 +DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585 +DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c +DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee +DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814 +DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa +DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +GLOBL ·constants(SB), 8, $256 + diff --git a/sha256/sha256block_other.go b/sha256/sha256block_other.go new file mode 100644 index 0000000..94d7eb0 --- /dev/null +++ b/sha256/sha256block_other.go @@ -0,0 +1,29 @@ +//go:build appengine || noasm || (!amd64 && !arm64) || !gc +// +build appengine noasm !amd64,!arm64 !gc + +/* + * Minio Cloud Storage, (C) 2019 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockIntelShaGo(dig *digest, p []byte) { + panic("blockIntelShaGo called unexpectedly") + +} + +func blockArmSha2Go(dig *digest, p []byte) { + panic("blockArmSha2Go called unexpectedly") +} diff --git a/sha256/test-architectures.sh b/sha256/test-architectures.sh new file mode 100644 index 0000000..50150ea --- /dev/null +++ b/sha256/test-architectures.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +set -e + +go tool dist list | while IFS=/ read os arch; do + echo "Checking $os/$arch..." + echo " normal" + GOARCH=$arch GOOS=$os go build -o /dev/null ./... + echo " noasm" + GOARCH=$arch GOOS=$os go build -tags noasm -o /dev/null ./... + echo " appengine" + GOARCH=$arch GOOS=$os go build -tags appengine -o /dev/null ./... + echo " noasm,appengine" + GOARCH=$arch GOOS=$os go build -tags 'appengine noasm' -o /dev/null ./... +done diff --git a/signer/signer.go b/signer/signer.go index 91e49ee..002331a 100644 --- a/signer/signer.go +++ b/signer/signer.go @@ -2,17 +2,15 @@ // abstract the signature algorithm from the usage. package signer -// I am an interface for a key pair for signing, created to abstract between a -// CGO fast BIP-340 signature library and the slower btcec library. +// I is an interface for a key pair for signing, created to abstract between a CGO fast BIP-340 +// signature library and the slower btcec library. type I interface { - // Generate creates a fresh new key pair from system entropy + // Generate creates a fresh new key pair from system entropy, and ensures it is even (so + // ECDH works). Generate() (err error) - // GenerateForECDH creates a fresh new key pair from system entropy, and - // ensures it can do ECDH. - GenerateForECDH() (err error) // InitSec initialises the secret (signing) key from the raw bytes, and also // derives the public key because it can. - InitSec(sec []byte, nobtcec ...bool) (err error) + InitSec(sec []byte) (err error) // InitPub initializes the public (verification) key from raw bytes, this is // expected to be an x-only 32 byte pubkey. InitPub(pub []byte) (err error) @@ -26,22 +24,19 @@ type I interface { Verify(msg, sig []byte) (valid bool, err error) // Zero wipes the secret key to prevent memory leaks. Zero() - // InitECDH initialises (if necessary) the ECDH key generation - InitECDH() - // ECDH returns a shared secret derived using Elliptic Curve Diffie-Hellman - // on the secret and provided pubkey. + // ECDH returns a shared secret derived using Elliptic Curve Diffie-Hellman on + // the I secret and provided pubkey. ECDH(pub []byte) (secret []byte, err error) } // Gen is an interface for nostr BIP-340 key generation. type Gen interface { - // Generate gathers entropy and derives pubkey bytes for matching, this - // returns the 33 byte compressed form for checking the oddness of the Y - // coordinate. + // Generate gathers entropy and derives pubkey bytes for matching, this returns the 33 byte + // compressed form for checking the oddness of the Y coordinate. Generate() (pubBytes []byte, err error) // Negate flips the public key Y coordinate between odd and even. Negate() - // KeyPairBytes returns the raw bytes of the secret and public key, this - // returns the 32 byte X-only pubkey. + // KeyPairBytes returns the raw bytes of the secret and public key, this returns the 32 byte + // X-only pubkey. KeyPairBytes() (secBytes, cmprPubBytes []byte) } diff --git a/socketapi/challenge.go b/socketapi/challenge.go new file mode 100644 index 0000000..c2737cb --- /dev/null +++ b/socketapi/challenge.go @@ -0,0 +1,39 @@ +package socketapi + +import ( + "crypto/rand" + "net/http" + "orly.dev/chk" + + "github.com/fasthttp/websocket" + + "orly.dev/bech32encoding" + "orly.dev/ec/bech32" + "orly.dev/ws" +) + +const ( + DefaultChallengeHRP = "nchal" + DefaultChallengeLength = 16 +) + +// GetListener generates a new ws.Listener with a new challenge for a subscriber. +func GetListener(conn *websocket.Conn, req *http.Request) (w *ws.Listener) { + var err error + cb := make([]byte, DefaultChallengeLength) + if _, err = rand.Read(cb); chk.E(err) { + panic(err) + } + var b5 []byte + if b5, err = bech32encoding.ConvertForBech32(cb); chk.E(err) { + return + } + var encoded []byte + if encoded, err = bech32.Encode( + []byte(DefaultChallengeHRP), b5, + ); chk.E(err) { + return + } + w = ws.NewListener(conn, req, encoded) + return +} diff --git a/socketapi/handleAuth.go b/socketapi/handleAuth.go new file mode 100644 index 0000000..9a5d071 --- /dev/null +++ b/socketapi/handleAuth.go @@ -0,0 +1,52 @@ +package socketapi + +import ( + "orly.dev/realy/interfaces" +) + +func (a *A) HandleAuth( + req []byte, + srv interfaces.Server, +) (msg []byte) { + + // if auther, ok := srv.Relay().(relay.Authenticator); ok && auther.AuthRequired() { + // svcUrl := auther.ServiceUrl(a.Req()) + // if svcUrl == "" { + // return + // } + // log.T.F("received auth response,%s", req) + // var err error + // var rem []byte + // env := authenvelope.NewResponse() + // if rem, err = env.Unmarshal(req); chk.E(err) { + // return + // } + // if len(rem) > 0 { + // log.I.F("extra '%s'", rem) + // } + // var valid bool + // if valid, err = auth.Validate(env.Event, []byte(a.Challenge()), + // svcUrl); chk.E(err) { + // e := err.Error() + // if err = okenvelope.NewFrom(env.Event.Id, false, + // normalize.Error.F(err.Error())).Write(a.Listener); chk.E(err) { + // return []byte(err.Error()) + // } + // return normalize.Error.F(e) + // } else if !valid { + // if err = okenvelope.NewFrom(env.Event.Id, false, + // normalize.Error.F("failed to authenticate")).Write(a.Listener); chk.E(err) { + // return []byte(err.Error()) + // } + // return normalize.Restricted.F("auth response does not validate") + // } else { + // if err = okenvelope.NewFrom(env.Event.Id, true, + // []byte{}).Write(a.Listener); chk.E(err) { + // return + // } + // log.D.F("%s authed to pubkey,%0x", a.RealRemote(), env.Event.Pubkey) + // a.SetAuthed(string(env.Event.Pubkey)) + // } + // } + return +} diff --git a/socketapi/handleClose.go b/socketapi/handleClose.go index 9832705..2633227 100644 --- a/socketapi/handleClose.go +++ b/socketapi/handleClose.go @@ -3,12 +3,14 @@ package socketapi import ( "orly.dev/chk" "orly.dev/envelopes/closeenvelope" - "orly.dev/interfaces/server" "orly.dev/log" - "orly.dev/publish" + "orly.dev/realy/interfaces" ) -func (a *A) HandleClose(req []byte, srv server.I) (note []byte) { +func (a *A) HandleClose( + req []byte, + srv interfaces.Server, +) (note []byte) { var err error var rem []byte env := closeenvelope.New() @@ -21,12 +23,13 @@ func (a *A) HandleClose(req []byte, srv server.I) (note []byte) { if env.ID.String() == "" { return []byte("CLOSE has no ") } - publish.P.Receive( + srv.Publisher().Receive( &W{ - Cancel: true, - I: a.Listener, - Id: env.ID.String(), + Cancel: true, + Listener: a.Listener, + Id: env.ID.String(), }, ) + // srv.Publisher().removeSubscriberId(a.Listener, env.ID.String()) return } diff --git a/socketapi/handleEvent.go b/socketapi/handleEvent.go index 1721917..af8ab34 100644 --- a/socketapi/handleEvent.go +++ b/socketapi/handleEvent.go @@ -2,7 +2,6 @@ package socketapi import ( "bytes" - "github.com/minio/sha256-simd" "orly.dev/chk" "orly.dev/context" "orly.dev/envelopes/eventenvelope" @@ -10,249 +9,300 @@ import ( "orly.dev/event" "orly.dev/filter" "orly.dev/hex" - "orly.dev/interfaces/server" - "orly.dev/interfaces/store" "orly.dev/ints" "orly.dev/kind" "orly.dev/log" - "orly.dev/publish" + "orly.dev/normalize" + "orly.dev/realy/interfaces" + "orly.dev/sha256" "orly.dev/tag" ) -func (a *A) HandleEvent(r []byte, s server.I, remote string) (msg []byte) { +func (a *A) HandleEvent( + c context.T, req []byte, srv interfaces.Server, +) (msg []byte) { - log.T.F("%s handleEvent %s %d bytes", remote, r, len(r)-1) + log.T.F("handleEvent %s %s", a.RealRemote(), req) var err error var ok bool var rem []byte - sto := s.Storage() + sto := srv.Storage() if sto == nil { panic("no event store has been set to store event") } + // var auther relay.Authenticator + // if auther, ok = srv.Relay().(relay.Authenticator); ok { + // } + rl := srv.Relay() + // advancedDeleter, _ := sto.(relay.AdvancedDeleter) env := eventenvelope.NewSubmission() - if rem, err = env.Unmarshal(r); chk.E(err) { + if rem, err = env.Unmarshal(req); chk.E(err) { return } if len(rem) > 0 { - log.T.F("%s extra '%s'", remote, rem) + log.I.F("extra '%s'", rem) } - if err = a.VerifyEvent(env); chk.E(err) { + // accept, notice, after := rl.AcceptEvent(c, env.F, a.Req(), + // a.RealRemote(), nil, + // //a.AuthedBytes(), + // ) + // if !accept { + // if strings.Contains(notice, "mute") { + // if err = okenvelope.NewFrom(env.Id, false, + // normalize.Blocked.F(notice)).Write(a.Listener); chk.F(err) { + // } + // } else { + // //if auther != nil && auther.AuthRequired() { + // // if !a.AuthRequested() { + // // a.RequestAuth() + // // log.I.F("requesting auth from client %s", a.RealRemote()) + // // if err = authenvelope.NewChallengeWith(a.Challenge()).Write(a.Listener); chk.F(err) { + // // return + // // } + // // if err = okenvelope.NewFrom(env.Id, false, + // // normalize.AuthRequired.F("auth required for storing events")).Write(a.Listener); chk.F(err) { + // // } + // // return + // // } else { + // // log.I.F("requesting auth again from client %s", a.RealRemote()) + // // if err = authenvelope.NewChallengeWith(a.Challenge()).Write(a.Listener); chk.F(err) { + // // return + // // } + // // if err = okenvelope.NewFrom(env.Id, false, + // // normalize.AuthRequired.F("auth required for storing events")).Write(a.Listener); chk.F(err) { + // // } + // // return + // // } + // //} else { + // // log.W.F("didn't find authentication method") + // //} + // } + // if err = okenvelope.NewFrom(env.Id, false, + // normalize.Invalid.F(notice)).Write(a.Listener); chk.F(err) { + // } + // return + // } + if !bytes.Equal(env.GetIDBytes(), env.Id) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Invalid.F("event id is computed incorrectly"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + if ok, err = env.Verify(); chk.T(err) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F("failed to verify signature"), + ).Write(a.Listener); chk.E(err) { + return + } + } else if !ok { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F("signature is invalid"), + ).Write(a.Listener); chk.E(err) { + return + } return } if env.E.Kind.K == kind.Deletion.K { - if err = a.CheckDelete(a.Context(), env, sto); chk.E(err) { + log.I.F("delete event\n%s", env.E.Serialize()) + for _, t := range env.Tags.ToSliceOfTags() { + var res []*event.E + if t.Len() >= 2 { + switch { + case bytes.Equal(t.Key(), []byte("e")): + evId := make([]byte, sha256.Size) + if _, err = hex.DecBytes(evId, t.Value()); chk.E(err) { + continue + } + res, err = sto.QueryEvents(c, &filter.F{Ids: tag.New(evId)}) + if err != nil { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F("failed to query for target event"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + for i := range res { + if res[i].Kind.Equal(kind.Deletion) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Blocked.F("not processing or storing delete event containing delete event references"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + if !bytes.Equal(res[i].Pubkey, env.E.Pubkey) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Blocked.F("cannot delete other users' events (delete by e tag)"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + } + case bytes.Equal(t.Key(), []byte("a")): + split := bytes.Split(t.Value(), []byte{':'}) + if len(split) != 3 { + continue + } + var pk []byte + if pk, err = hex.DecAppend(nil, split[1]); chk.E(err) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Invalid.F( + "delete event a tag pubkey value invalid: %s", + t.Value(), + ), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + kin := ints.New(uint16(0)) + if _, err = kin.Unmarshal(split[0]); chk.E(err) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Invalid.F( + "delete event a tag kind value invalid: %s", + t.Value(), + ), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + kk := kind.New(kin.Uint16()) + if kk.Equal(kind.Deletion) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Blocked.F("delete event kind may not be deleted"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + if !kk.IsParameterizedReplaceable() { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F("delete tags with a tags containing non-parameterized-replaceable events cannot be processed"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + if !bytes.Equal(pk, env.E.Pubkey) { + log.I.S(pk, env.E.Pubkey, env.E) + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Blocked.F("cannot delete other users' events (delete by a tag)"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + f := filter.New() + f.Kinds.K = []*kind.T{kk} + // aut := make(by, 0, len(pk)/2) + // if aut, err = hex.DecAppend(aut, pk); chk.E(err) { + // return + // } + f.Authors.Append(pk) + f.Tags.AppendTags(tag.New([]byte{'#', 'd'}, split[2])) + res, err = sto.QueryEvents(c, f) + if err != nil { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F("failed to query for target event"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + } + } + if len(res) < 1 { + continue + } + var resTmp []*event.E + for _, v := range res { + if env.E.CreatedAt.U64() >= v.CreatedAt.U64() { + resTmp = append(resTmp, v) + } + } + res = resTmp + for _, target := range res { + if target.Kind.K == kind.Deletion.K { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F( + "cannot delete delete event %s", + env.Id, + ), + ).Write(a.Listener); chk.E(err) { + return + } + } + if target.CreatedAt.Int() > env.E.CreatedAt.Int() { + log.I.F( + "not deleting\n%d%\nbecause delete event is older\n%d", + target.CreatedAt.Int(), env.E.CreatedAt.Int(), + ) + continue + } + if !bytes.Equal(target.Pubkey, env.Pubkey) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F("only author can delete event"), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + // if advancedDeleter != nil { + // advancedDeleter.BeforeDelete(c, t.Value(), env.Pubkey) + // } + if err = sto.DeleteEvent(c, target.EventId()); chk.T(err) { + if err = okenvelope.NewFrom( + env.Id, false, + normalize.Error.F(err.Error()), + ).Write(a.Listener); chk.E(err) { + return + } + return + } + // if advancedDeleter != nil { + // advancedDeleter.AfterDelete(t.Value(), env.Pubkey) + // } + } + res = nil + } + if err = okenvelope.NewFrom( + env.Id, true, + ).Write(a.Listener); chk.E(err) { return } } var reason []byte - ok, reason = s.AddEvent( - a.Context(), env.E, a.Listener.Req(), remote, - ) - if ok { - go publish.P.Deliver(env.E) - } + ok, reason = srv.AddEvent( + c, rl, env.E, a.Req(), a.RealRemote(), nil, + ) // a.AuthedBytes(), + + log.I.F("event added %v, %s", ok, reason) if err = okenvelope.NewFrom( - env.Id(), ok, reason, + env.Id, ok, reason, ).Write(a.Listener); chk.E(err) { return } - return -} - -func (a *A) VerifyEvent(env *eventenvelope.Submission) (err error) { - if !bytes.Equal(env.GetIDBytes(), env.Id()) { - if err = Ok.Invalid( - a, env, "event id is computed incorrectly", - ); chk.E(err) { - return - } - return - } - var ok bool - if ok, err = env.Verify(); chk.T(err) { - if err = Ok.Error( - a, env, "failed to verify signature", err, - ); chk.T(err) { - return - } - return - } else if !ok { - if err = Ok.Error(a, env, "signature is invalid", err); chk.T(err) { - return - } - return - } - return -} - -func (a *A) CheckDelete( - c context.T, env *eventenvelope.Submission, sto store.I, -) (err error) { - log.I.F("delete event\n%s", env.E.Serialize()) - for _, t := range env.Tags.ToSliceOfTags() { - var res []*event.E - if t.Len() >= 2 { - switch { - case bytes.Equal(t.Key(), []byte("e")): - evId := make([]byte, sha256.Size) - if _, err = hex.DecBytes(evId, t.Value()); chk.E(err) { - continue - } - res, err = sto.QueryEvents(c, &filter.F{Ids: tag.New(evId)}) - if err != nil { - if err = Ok.Error( - a, env, "failed to query for target event", - ); chk.T(err) { - return - } - return - } - for i := range res { - if res[i].Kind.Equal(kind.Deletion) { - if err = Ok.Blocked( - a, env, - "not processing or storing delete event containing delete event references", - ); chk.E(err) { - return - } - return - } - if !bytes.Equal(res[i].Pubkey, env.E.Pubkey) { - if err = Ok.Blocked( - a, env, - "cannot delete other users' events (delete by e tag)", - ); chk.E(err) { - return - } - return - } - } - case bytes.Equal(t.Key(), []byte("a")): - split := bytes.Split(t.Value(), []byte{':'}) - if len(split) != 3 { - continue - } - var pk []byte - if pk, err = hex.DecAppend(nil, split[1]); chk.E(err) { - if err = Ok.Invalid( - a, env, - "delete event a tag pubkey value invalid: %s", - t.Value(), - ); chk.T(err) { - } - return - } - kin := ints.New(uint16(0)) - if _, err = kin.Unmarshal(split[0]); chk.E(err) { - if err = Ok.Invalid( - a, env, - "delete event a tag kind value invalid: %s", t.Value(), - ); chk.T(err) { - return - } - return - } - kk := kind.New(kin.Uint16()) - if kk.Equal(kind.Deletion) { - if err = Ok.Blocked( - a, env, "delete event kind may not be deleted", - ); chk.E(err) { - return - } - return - } - if !kk.IsParameterizedReplaceable() { - if err = Ok.Error( - a, env, - "delete tags with a tags containing non-parameterized-replaceable events cannot be processed", - ); chk.E(err) { - return - } - return - } - if !bytes.Equal(pk, env.E.Pubkey) { - log.I.S(pk, env.E.Pubkey, env.E) - if err = Ok.Blocked( - a, env, - "cannot delete other users' events (delete by a tag)", - ); chk.E(err) { - return - } - return - } - f := filter.New() - f.Kinds.K = []*kind.T{kk} - f.Authors.Append(pk) - f.Tags.AppendTags(tag.New([]byte{'#', 'd'}, split[2])) - if res, err = sto.QueryEvents(c, f); err != nil { - if err = Ok.Error( - a, env, - "failed to query for target event", - ); chk.T(err) { - return - } - return - } - } - } - if len(res) < 1 { - continue - } - var resTmp event.S - for _, v := range res { - if env.E.CreatedAt.U64() >= v.CreatedAt.U64() { - resTmp = append(resTmp, v) - } - } - res = resTmp - for _, target := range res { - var skip bool - if skip, err = a.ProcessDelete(c, target, env, sto); skip { - continue - } else if err != nil { - return - } - } - res = nil - } - if err = okenvelope.NewFrom(env.Id(), true).Write(a.Listener); chk.E(err) { - return - } - return -} - -func (a *A) ProcessDelete( - c context.T, target *event.E, env *eventenvelope.Submission, - sto store.I, -) (skip bool, err error) { - if target.Kind.K == kind.Deletion.K { - if err = Ok.Error( - a, env, "cannot delete delete event %s", env.Id, - ); chk.E(err) { - return - } - } - if target.CreatedAt.Int() > env.E.CreatedAt.Int() { - if err = Ok.Error( - a, env, - "not deleting\n%d%\nbecause delete event is older\n%d", - target.CreatedAt.Int(), env.E.CreatedAt.Int(), - ); chk.E(err) { - return - } - skip = true - } - if !bytes.Equal(target.Pubkey, env.Pubkey) { - if err = Ok.Error(a, env, "only author can delete event"); chk.E(err) { - return - } - return - } - if err = sto.DeleteEvent(c, target.EventId()); chk.T(err) { - if err = Ok.Error(a, env, err.Error()); chk.T(err) { - return - } - return - } + // if after != nil { + // after() + // } return } diff --git a/socketapi/handleMessage.go b/socketapi/handleMessage.go index 6baa713..01b8937 100644 --- a/socketapi/handleMessage.go +++ b/socketapi/handleMessage.go @@ -2,43 +2,51 @@ package socketapi import ( "fmt" - "orly.dev/chk" + "orly.dev/log" + "orly.dev/envelopes" + "orly.dev/envelopes/authenvelope" "orly.dev/envelopes/closeenvelope" "orly.dev/envelopes/eventenvelope" "orly.dev/envelopes/noticeenvelope" "orly.dev/envelopes/reqenvelope" - "orly.dev/log" ) -func (a *A) HandleMessage(msg []byte, remote string) { - log.T.F("received message from %s\n%s", remote, msg) +func (a *A) HandleMessage(msg []byte) { var notice []byte var err error var t string var rem []byte - if t, rem = envelopes.Identify(msg); chk.E(err) { + if t, rem, err = envelopes.Identify(msg); chk.E(err) { notice = []byte(err.Error()) } + // rl := a.Relay() switch t { case eventenvelope.L: - notice = a.HandleEvent(rem, a.I, remote) + notice = a.HandleEvent(a.Context(), rem, a.Server) case reqenvelope.L: notice = a.HandleReq( - rem, a.I, remote, + a.Context(), rem, + // a.Options().SkipEventFunc, + a.Server, ) case closeenvelope.L: - notice = a.HandleClose(rem, a.I) - // case authenvelope.L: - // notice = a.HandleAuth(rem, a.Server) + notice = a.HandleClose(rem, a.Server) + case authenvelope.L: + notice = a.HandleAuth(rem, a.Server) default: + // if wsh, ok := rl.(relay.WebSocketHandler); ok { + // wsh.HandleUnknownType(a.Listener, t, rem) + // } else { notice = []byte(fmt.Sprintf("unknown envelope type %s\n%s", t, rem)) + // } } if len(notice) > 0 { - log.D.F("notice->%s %s", remote, notice) + log.D.F("notice->%s %s", a.RealRemote(), notice) if err = noticeenvelope.NewFrom(notice).Write(a.Listener); err != nil { return } } + } diff --git a/socketapi/handleReq.go b/socketapi/handleReq.go index 6d51051..d9fb829 100644 --- a/socketapi/handleReq.go +++ b/socketapi/handleReq.go @@ -1,105 +1,107 @@ package socketapi import ( - "fmt" + "errors" "orly.dev/chk" "orly.dev/envelopes/closedenvelope" + "orly.dev/log" + + "github.com/dgraph-io/badger/v4" + + "orly.dev/context" "orly.dev/envelopes/eoseenvelope" "orly.dev/envelopes/eventenvelope" "orly.dev/envelopes/reqenvelope" "orly.dev/event" - "orly.dev/interfaces/server" - "orly.dev/log" - "orly.dev/publish" - "sort" + "orly.dev/normalize" + "orly.dev/realy/interfaces" + "orly.dev/realy/pointers" ) func (a *A) HandleReq( - rem []byte, s server.I, remote string, -) (notice []byte) { - log.T.F("received request from %s", remote) + c context.T, req []byte, srv interfaces.Server, +) (r []byte) { + log.I.F("REQ:\n%s", req) + sto := srv.Storage() var err error - sto := s.Storage() - if sto == nil { - panic("no event store has been set to fetch events") - } + var rem []byte env := reqenvelope.New() - if rem, err = env.Unmarshal(rem); chk.E(err) { - notice = []byte(err.Error()) - return + if rem, err = env.Unmarshal(req); chk.E(err) { + return normalize.Error.F(err.Error()) } - log.I.S(env) - var evs event.S - // if the number of events on a filter matches the limit, mark the filter - // complete to prevent opening a subscription. - completed := make([]bool, len(env.Filters.F)) - for i, f := range env.Filters.F { - var e event.S - if e, err = sto.QueryEvents(a.Context(), f); chk.E(err) { - // this one failed, maybe try another - err = nil - if f.Ids.Len() > 0 { - completed[i] = true + if len(rem) > 0 { + log.I.F("extra '%s'", rem) + } + allowed := env.Filters + var events event.S + for _, f := range allowed.F { + // var i uint + if pointers.Present(f.Limit) { + if *f.Limit == 0 { + continue + } + } + log.D.F( + "query from %s %0x,%s", a.RealRemote(), nil, + f.Serialize(), + ) + if events, err = sto.QueryEvents(c, f); err != nil { + log.E.F("eventstore: %v", err) + if errors.Is(err, badger.ErrDBClosed) { + return } continue } - evs = append(evs, e...) - if (f.Limit != nil && int(*f.Limit) <= len(evs) && *f.Limit > 0) || f.Ids.Len() > 0 { - completed[i] = true - } - } - sort.Slice( - evs, func(i, j int) bool { - return evs[i].CreatedAt.I64() > evs[j].CreatedAt.I64() - }, - ) - for _, ev := range evs { - log.I.F("sending event\n%s", ev.Serialize()) - var res *eventenvelope.Result - if res, err = eventenvelope.NewResultWith( - env.Subscription.String(), ev, - ); chk.E(err) { - continue - } - if err = res.Write(a.Listener); chk.E(err) { - continue + // write out the events to the socket + for _, ev := range events { + var res *eventenvelope.Result + if res, err = eventenvelope.NewResultWith( + env.Subscription.T, + ev, + ); chk.E(err) { + return + } + if err = res.Write(a.Listener); chk.E(err) { + return + } } } if err = eoseenvelope.NewFrom(env.Subscription).Write(a.Listener); chk.E(err) { return } - // if all filters are complete, return instead of opening a subscription - complete := true - for _, c := range completed { - if !c { - complete = false + receiver := make(event.C, 32) + cancel := true + // if the query was for just Ids we know there cannot be any more results, so cancel the subscription. + for _, f := range allowed.F { + if f.Ids.Len() < 1 { + cancel = false + break + } + // also, if we received the limit amount of events, subscription ded + if pointers.Present(f.Limit) { + if len(events) < int(*f.Limit) { + cancel = false + } + } + if !cancel { break } } - if complete { - log.I.F("all filters complete, returning") + if !cancel { + srv.Publisher().Receive( + &W{ + Listener: a.Listener, + Id: env.Subscription.String(), + Receiver: receiver, + Filters: env.Filters, + }, + ) + } else { if err = closedenvelope.NewFrom( - env.Subscription, []byte(fmt.Sprintf( - "subscription %s complete", env.Subscription.String(), - )), + env.Subscription, nil, ).Write(a.Listener); chk.E(err) { return } - return } - for _, f := range env.Filters.F { - log.I.F( - "opening subscription for %s %s", env.Subscription, f.Marshal(nil), - ) - } - receiver := make(event.C, 32) - publish.P.Receive( - &W{ - I: a.Listener, - Id: env.Subscription.String(), - Receiver: receiver, - Filters: env.Filters, - }, - ) return } diff --git a/socketapi/ok.go b/socketapi/ok.go deleted file mode 100644 index b40b90e..0000000 --- a/socketapi/ok.go +++ /dev/null @@ -1,110 +0,0 @@ -package socketapi - -import ( - "orly.dev/envelopes/eid" - "orly.dev/envelopes/okenvelope" - "orly.dev/reason" -) - -type OK func(a *A, env eid.Ider, format string, params ...any) (err error) - -type OKs struct { - AuthRequired OK - PoW OK - Duplicate OK - Blocked OK - RateLimited OK - Invalid OK - Error OK - Unsupported OK - Restricted OK -} - -var Ok = OKs{ - AuthRequired: func( - a *A, env eid.Ider, format string, params ...any, - ) (err error) { - rr := reason.AuthRequired.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.AuthRequired.Err(format, params...) - }, - PoW: func(a *A, env eid.Ider, format string, params ...any) (err error) { - rr := reason.PoW.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.PoW.Err(format, params...) - }, - Duplicate: func( - a *A, env eid.Ider, format string, params ...any, - ) (err error) { - rr := reason.Duplicate.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.Duplicate.Err(format, params...) - }, - Blocked: func( - a *A, env eid.Ider, format string, params ...any, - ) (err error) { - rr := reason.Blocked.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.Blocked.Err(format, params...) - }, - RateLimited: func( - a *A, env eid.Ider, format string, params ...any, - ) (err error) { - rr := reason.RateLimited.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.RateLimited.Err(format, params...) - }, - Invalid: func( - a *A, env eid.Ider, format string, params ...any, - ) (err error) { - rr := reason.Invalid.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.Invalid.Err(format, params...) - }, - Error: func(a *A, env eid.Ider, format string, params ...any) (err error) { - rr := reason.Error.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.Error.Err(format, params...) - }, - Unsupported: func( - a *A, env eid.Ider, format string, params ...any, - ) (err error) { - rr := reason.Unsupported.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.Unsupported.Err(format, params...) - }, - Restricted: func( - a *A, env eid.Ider, format string, params ...any, - ) (err error) { - rr := reason.Restricted.F(format, params...) - r := okenvelope.NewFrom( - env.Id(), false, rr, - ) - r.Write(a.Listener) - return reason.Restricted.Err(format, params...) - }, -} diff --git a/socketapi/pinger.go b/socketapi/pinger.go new file mode 100644 index 0000000..f077859 --- /dev/null +++ b/socketapi/pinger.go @@ -0,0 +1,38 @@ +package socketapi + +import ( + "orly.dev/log" + "time" + + "github.com/fasthttp/websocket" + + "orly.dev/context" + "orly.dev/realy/interfaces" +) + +func (a *A) Pinger( + ctx context.T, ticker *time.Ticker, cancel context.F, s interfaces.Server, +) { + defer func() { + cancel() + ticker.Stop() + _ = a.Listener.Conn.Close() + }() + var err error + for { + select { + case <-ticker.C: + err = a.Listener.Conn.WriteControl( + websocket.PingMessage, nil, + time.Now().Add(DefaultPingWait), + ) + if err != nil { + log.E.F("error writing ping: %v; closing websocket", err) + return + } + a.Listener.RealRemote() + case <-ctx.Done(): + return + } + } +} diff --git a/socketapi/publisher.go b/socketapi/publisher.go index b56eb3e..ed2f87f 100644 --- a/socketapi/publisher.go +++ b/socketapi/publisher.go @@ -1,18 +1,16 @@ package socketapi import ( - "orly.dev/interfaces/listener" - "orly.dev/interfaces/typer" + "orly.dev/chk" "orly.dev/log" "regexp" "sync" - "orly.dev/chk" "orly.dev/envelopes/eventenvelope" "orly.dev/event" "orly.dev/filters" - "orly.dev/interfaces/publisher" - "orly.dev/publish" + "orly.dev/realy/publish/publisher" + "orly.dev/ws" ) const Type = "socketapi" @@ -22,14 +20,14 @@ var ( ) // Map is a map of filters associated with a collection of ws.Listener connections. -type Map map[listener.I]map[string]*filters.T +type Map map[*ws.Listener]map[string]*filters.T type W struct { - listener.I + *ws.Listener // If Cancel is true, this is a close command. Cancel bool - // Id is the subscription Id. If Cancel is true, cancel the named - // subscription, otherwise, cancel the publisher for the socket. + // Id is the subscription Id. If Cancel is true, cancel the named subscription, otherwise, + // cancel the publisher for the socket. Id string Receiver event.C Filters *filters.T @@ -38,7 +36,7 @@ type W struct { func (w *W) Type() string { return Type } type Close struct { - listener.I + *ws.Listener Id string } @@ -51,58 +49,77 @@ type S struct { var _ publisher.I = &S{} -func init() { - publish.P.Register(NewPublisher()) -} - -func NewPublisher() *S { return &S{Map: make(Map)} } +func New() *S { return &S{Map: make(Map)} } func (p *S) Type() string { return Type } -func (p *S) Receive(msg typer.T) { +func (p *S) Receive(msg publisher.Message) { if m, ok := msg.(*W); ok { if m.Cancel { if m.Id == "" { - log.T.F("removing subscriber %s", m.I.Remote()) - p.removeSubscriber(m.I) + p.removeSubscriber(m.Listener) + log.T.F("removed listener %s", m.Listener.RealRemote()) } else { + p.removeSubscriberId(m.Listener, m.Id) log.T.F( - "removing subscription %s of %s", - m.Id, m.I.Remote(), + "removed subscription %s for %s", m.Id, + m.Listener.RealRemote(), ) - p.removeSubscriberId(m.I, m.Id) } return } p.Mx.Lock() - if subs, ok := p.Map[m.I]; !ok { - log.T.F( - "adding subscription %s for new subscriber %s\n%s", m.Id, - m.I.Remote(), - m.Filters.Marshal(nil), - ) + if subs, ok := p.Map[m.Listener]; !ok { subs = make(map[string]*filters.T) subs[m.Id] = m.Filters - p.Map[m.I] = subs - } else { + // log.I.S(p.Map) + p.Map[m.Listener] = subs log.T.F( - "adding subscription %s for subscriber %s", m.Id, m.I.Remote(), + "created new subscription for %s, %s", m.Listener.RealRemote(), + m.Filters.Marshal(nil), ) + // log.I.S(m.Listener, p.Map) + } else { subs[m.Id] = m.Filters + log.T.F( + "added subscription %s for %s", m.Id, m.Listener.RealRemote(), + ) } p.Mx.Unlock() - } } func (p *S) Deliver(ev *event.E) { + log.T.F("delivering event %0x to subscribers", ev.Id) var err error - // p.Mx.Lock() + p.Mx.Lock() for w, subs := range p.Map { + log.I.F("%v %s", subs, w.RealRemote()) for id, subscriber := range subs { + log.T.F( + "subscriber %s\n%s", w.RealRemote(), subscriber.Marshal(nil), + ) + // if !publicReadable { + // if authRequired && !w.IsAuthed() { + // continue + // } + // } if !subscriber.Match(ev) { continue } + // if ev.Kind.IsPrivileged() { + // ab := w.AuthedBytes() + // var containsPubkey bool + // if ev.Tags != nil { + // containsPubkey = ev.Tags.ContainsAny([]byte{'p'}, tag.New(ab)) + // } + // if !bytes.Equal(ev.Pubkey, ab) || containsPubkey { + // if ab == nil { + // continue + // } + // continue + // } + // } var res *eventenvelope.Result if res, err = eventenvelope.NewResultWith(id, ev); chk.E(err) { continue @@ -110,17 +127,14 @@ func (p *S) Deliver(ev *event.E) { if err = res.Write(w); chk.E(err) { continue } - log.T.F( - "sent event to subscriber %s for subscription %s\n%s", - w.Remote(), id, ev.Serialize(), - ) + log.T.F("dispatched event %0x to subscription %s", ev.Id, id) } } - // p.Mx.Unlock() + p.Mx.Unlock() } // removeSubscriberId removes a specific subscription from a subscriber websocket. -func (p *S) removeSubscriberId(ws listener.I, id string) { +func (p *S) removeSubscriberId(ws *ws.Listener, id string) { p.Mx.Lock() var subs map[string]*filters.T var ok bool @@ -135,7 +149,7 @@ func (p *S) removeSubscriberId(ws listener.I, id string) { } // removeSubscriber removes a websocket from the S collection. -func (p *S) removeSubscriber(ws listener.I) { +func (p *S) removeSubscriber(ws *ws.Listener) { p.Mx.Lock() clear(p.Map[ws]) delete(p.Map, ws) diff --git a/socketapi/socketapi.go b/socketapi/socketapi.go index b050995..a5c1b18 100644 --- a/socketapi/socketapi.go +++ b/socketapi/socketapi.go @@ -1,107 +1,97 @@ package socketapi import ( - "github.com/fasthttp/websocket" "net/http" "orly.dev/chk" - "orly.dev/context" - "orly.dev/helpers" - "orly.dev/interfaces/server" "orly.dev/log" - "orly.dev/servemux" - "orly.dev/units" - "orly.dev/ws" "strings" "time" + + "github.com/fasthttp/websocket" + + "orly.dev/context" + "orly.dev/realy/interfaces" + "orly.dev/units" + "orly.dev/ws" ) -type SocketParams struct { - WriteWait time.Duration - PongWait time.Duration - PingWait time.Duration - MaxMessageSize int64 -} - -func DefaultSocketParams() *SocketParams { - return &SocketParams{ - WriteWait: 10 * time.Second, - PongWait: 60 * time.Second, - PingWait: 30 * time.Second, - MaxMessageSize: 1 * units.Mb, - } -} +const ( + DefaultWriteWait = 10 * time.Second + DefaultPongWait = 60 * time.Second + DefaultPingWait = DefaultPongWait / 2 + DefaultMaxMessageSize = 1 * units.Mb +) type A struct { Ctx context.T - server.I - // Web is an optional web server that appears on `/` with no Upgrade for - // websockets or Accept for application/nostr+json present. - Web http.Handler - *SocketParams - Listener *ws.Listener + *ws.Listener + interfaces.Server + // ClientsMu *sync.Mutex + // Clients map[*websocket.Conn]struct{} } -var Upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, WriteBufferSize: 1024, - CheckOrigin: func(r *http.Request) bool { - return true - }, -} +func (a *A) Serve(w http.ResponseWriter, r *http.Request, s interfaces.Server) { -func New(s server.I, path string, sm *servemux.S, socketParams *SocketParams) { - a := &A{I: s, SocketParams: socketParams} - sm.Handle(path, a) - return -} - -// ServeHTTP handles incoming HTTP requests and processes them accordingly. It -// serves the relayinfo for specific headers or delegates to a web handler. It -// processes WebSocket upgrade requests when applicable. -func (a *A) ServeHTTP(w http.ResponseWriter, r *http.Request) { - remote := helpers.GetRemoteFromReq(r) - if r.Header.Get("Upgrade") != "websocket" && - r.Header.Get("Accept") == "application/nostr+json" { - log.T.F("serving relayinfo %s", remote) - a.I.HandleRelayInfo(w, r) - return - } - if r.Header.Get("Upgrade") != "websocket" { - if a.Web == nil { - a.I.HandleRelayInfo(w, r) - } else { - a.Web.ServeHTTP(w, r) - } - return - } var err error - ticker := time.NewTicker(a.PingWait) + + ticker := time.NewTicker(DefaultPingWait) var cancel context.F - a.Ctx, cancel = context.Cancel(a.I.Context()) + a.Ctx, cancel = context.Cancel(s.Context()) var conn *websocket.Conn - if conn, err = Upgrader.Upgrade(w, r, nil); err != nil { - log.E.F("%s failed to upgrade websocket: %v", remote, err) + conn, err = Upgrader.Upgrade(w, r, nil) + if err != nil { + log.E.F("failed to upgrade websocket: %v", err) return } - log.T.F( - "upgraded to websocket %s (remote %s local %s)", remote, - conn.RemoteAddr(), conn.LocalAddr(), - ) - a.Listener = ws.NewListener(conn, r) - conn.SetReadLimit(a.MaxMessageSize) - chk.E(conn.SetReadDeadline(time.Now().Add(a.PongWait))) + // a.ClientsMu.Lock() + // a.Clients[conn] = struct{}{} + // a.ClientsMu.Unlock() + a.Listener = GetListener(conn, r) + + defer func() { + cancel() + ticker.Stop() + // a.ClientsMu.Lock() + // if _, ok := a.Clients[a.Listener.Conn]; ok { + a.Publisher().Receive( + &W{ + Cancel: true, + Listener: a.Listener, + }, + ) + // delete(a.Clients, a.Listener.Conn) + chk.E(a.Listener.Conn.Close()) + // a.Publisher().removeSubscriber(a.Listener) + // } + // a.ClientsMu.Unlock() + }() + conn.SetReadLimit(DefaultMaxMessageSize) + chk.E(conn.SetReadDeadline(time.Now().Add(DefaultPongWait))) conn.SetPongHandler( func(string) error { - chk.E(conn.SetReadDeadline(time.Now().Add(a.PongWait))) + chk.E(conn.SetReadDeadline(time.Now().Add(DefaultPongWait))) return nil }, ) - go a.Pinger(a.Ctx, ticker, cancel, remote) + // if a.Server.AuthRequired() { + // a.Listener.RequestAuth() + // } + // if a.Listener.AuthRequested() && len(a.Listener.Authed()) == 0 { + // log.I.F("requesting auth from client from %s", a.Listener.RealRemote()) + // if err = authenvelope.NewChallengeWith(a.Listener.Challenge()).Write(a.Listener); chk.E(err) { + // return + // } + // // return + // } + go a.Pinger(a.Ctx, ticker, cancel, a.Server) var message []byte var typ int for { select { case <-a.Ctx.Done(): - log.I.F("%s closing connection", remote) + a.Listener.Close() + return + case <-s.Context().Done(): a.Listener.Close() return default: @@ -128,43 +118,12 @@ func (a *A) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } if typ == websocket.PingMessage { - log.T.F("pinging %s", remote) - if _, err = a.Listener.Write(nil); chk.E(err) { + if err = a.Listener.WriteMessage( + websocket.PongMessage, nil, + ); chk.E(err) { } continue } - go a.HandleMessage(message, remote) - } - -} - -func (a *A) Pinger( - ctx context.T, ticker *time.Ticker, cancel context.F, remote string, -) { - log.T.F("running pinger for %s", remote) - defer func() { - cancel() - ticker.Stop() - _ = a.Listener.Conn.Close() - log.T.F("stopped pinger for %s", remote) - }() - var err error - for { - select { - case <-ticker.C: - err = a.Listener.Conn.WriteControl( - websocket.PingMessage, nil, - time.Now().Add(a.PingWait), - ) - if err != nil { - log.E.F( - "%s error writing ping: %v; closing websocket", remote, err, - ) - return - } - case <-ctx.Done(): - log.I.F("context done for %s", remote) - return - } + go a.HandleMessage(message) } } diff --git a/socketapi/upgrader.go b/socketapi/upgrader.go new file mode 100644 index 0000000..8ade039 --- /dev/null +++ b/socketapi/upgrader.go @@ -0,0 +1,12 @@ +package socketapi + +import ( + "net/http" + + "github.com/fasthttp/websocket" +) + +var Upgrader = websocket.Upgrader{ReadBufferSize: 1024, WriteBufferSize: 1024, + CheckOrigin: func(r *http.Request) bool { + return true + }} diff --git a/subscription/subscriptionid.go b/subscription/subscriptionid.go index bccf985..d7f7215 100644 --- a/subscription/subscriptionid.go +++ b/subscription/subscriptionid.go @@ -5,11 +5,11 @@ package subscription import ( "crypto/rand" - "orly.dev/chk" - "orly.dev/ec/bech32" "orly.dev/errorf" "orly.dev/log" + + "orly.dev/ec/bech32" "orly.dev/text" ) diff --git a/subscription/subscriptionid_test.go b/subscription/subscriptionid_test.go index c267824..71a95d8 100644 --- a/subscription/subscriptionid_test.go +++ b/subscription/subscriptionid_test.go @@ -2,11 +2,10 @@ package subscription import ( "bytes" + "orly.dev/chk" "testing" "lukechampine.com/frand" - - "orly.dev/chk" ) func TestMarshalUnmarshal(t *testing.T) { diff --git a/tag/atag/atag.go b/tag/atag/atag.go index 7f05b0f..5873175 100644 --- a/tag/atag/atag.go +++ b/tag/atag/atag.go @@ -4,8 +4,8 @@ package atag import ( "bytes" - "orly.dev/chk" + "orly.dev/hex" "orly.dev/ints" "orly.dev/kind" diff --git a/tag/atag/atag_test.go b/tag/atag/atag_test.go index b08b6ac..02983e6 100644 --- a/tag/atag/atag_test.go +++ b/tag/atag/atag_test.go @@ -3,15 +3,15 @@ package atag import ( "bytes" "math" + "orly.dev/chk" + "orly.dev/log" "testing" "lukechampine.com/frand" - "orly.dev/chk" "orly.dev/ec/schnorr" "orly.dev/hex" "orly.dev/kind" - "orly.dev/log" ) func TestT_Marshal_Unmarshal(t *testing.T) { diff --git a/tag/tag.go b/tag/tag.go index a7299a5..1eaf686 100644 --- a/tag/tag.go +++ b/tag/tag.go @@ -5,11 +5,9 @@ package tag import ( "bytes" - - "golang.org/x/exp/constraints" - "orly.dev/errorf" "orly.dev/log" + "orly.dev/normalize" "orly.dev/text" ) @@ -48,13 +46,7 @@ func New[V string | []byte](fields ...V) (t *T) { } // NewWithCap creates a new empty tag.T with a pre-allocated capacity for some number of fields. -func NewWithCap[V constraints.Integer](c V) *T { - return &T{ - make( - []BS[[]byte], 0, c, - ), - } -} +func NewWithCap(c int) *T { return &T{make([]BS[[]byte], 0, c)} } // S returns a field of a tag.T as a string. func (t *T) S(i int) (s string) { @@ -198,17 +190,6 @@ func (t *T) Key() []byte { return nil } -// KeyString returns the first element of the tags as a string. -func (t *T) KeyString() string { - if t == nil { - return "" - } - if t.Len() > Key { - return string(t.field[Key]) - } - return "" -} - // FilterKey returns the first element of a filter tag (the key) with the # removed func (t *T) FilterKey() []byte { if t == nil { diff --git a/tag/tag_test.go b/tag/tag_test.go index 286c516..d264ca9 100644 --- a/tag/tag_test.go +++ b/tag/tag_test.go @@ -2,12 +2,11 @@ package tag import ( "bytes" + "orly.dev/chk" + "orly.dev/log" "testing" "lukechampine.com/frand" - - "orly.dev/chk" - "orly.dev/log" ) func TestMarshalUnmarshal(t *testing.T) { diff --git a/tags/tags.go b/tags/tags.go index bbce250..5fa6ffe 100644 --- a/tags/tags.go +++ b/tags/tags.go @@ -7,13 +7,11 @@ import ( "encoding/json" "errors" "fmt" + "orly.dev/chk" + "orly.dev/log" "os" "sort" - "golang.org/x/exp/constraints" - - "orly.dev/chk" - "orly.dev/log" "orly.dev/lol" "orly.dev/tag" ) @@ -36,7 +34,7 @@ func New(fields ...*tag.T) (t *T) { } // NewWithCap creates a tags.T with space pre-allocated for a number of tag.T elements. -func NewWithCap[V constraints.Integer](c V) (t *T) { +func NewWithCap(c int) (t *T) { return &T{element: make([]*tag.T, 0, c)} } diff --git a/tags/tags_test.go b/tags/tags_test.go index 9a44eb0..57fa83b 100644 --- a/tags/tags_test.go +++ b/tags/tags_test.go @@ -2,13 +2,13 @@ package tags import ( "bytes" + "orly.dev/chk" + "orly.dev/log" "testing" "lukechampine.com/frand" - "orly.dev/chk" "orly.dev/hex" - "orly.dev/log" "orly.dev/tag" ) diff --git a/tests/generate.go b/tests/generate.go new file mode 100644 index 0000000..9397b87 --- /dev/null +++ b/tests/generate.go @@ -0,0 +1,36 @@ +// Package tests provides a tool to generate arbitrary random events for fuzz +// testing the encoder. +package tests + +import ( + "encoding/base64" + "orly.dev/chk" + + "lukechampine.com/frand" + + "orly.dev/event" + "orly.dev/kind" + "orly.dev/p256k" + "orly.dev/timestamp" +) + +// GenerateEvent creates events full of random kinds and content data. +func GenerateEvent(maxSize int) (ev *event.E, binSize int, err error) { + l := frand.Intn(maxSize * 6 / 8) // account for base64 expansion + ev = &event.E{ + Kind: kind.TextNote, + CreatedAt: timestamp.Now(), + Content: []byte(base64.StdEncoding.EncodeToString(frand.Bytes(l))), + } + signer := new(p256k.Signer) + if err = signer.Generate(); chk.E(err) { + return + } + if err = ev.Sign(signer); chk.E(err) { + return + } + var bin []byte + bin = ev.Marshal(bin) + binSize = len(bin) + return +} diff --git a/text/escape_test.go b/text/escape_test.go index 878b8ce..0b58806 100644 --- a/text/escape_test.go +++ b/text/escape_test.go @@ -1,12 +1,12 @@ package text import ( + "orly.dev/chk" "testing" "lukechampine.com/frand" - "github.com/minio/sha256-simd" - "orly.dev/chk" + "orly.dev/sha256" ) func TestUnescapeByteString(t *testing.T) { diff --git a/text/helpers.go b/text/helpers.go index 0522568..d98a47a 100644 --- a/text/helpers.go +++ b/text/helpers.go @@ -3,11 +3,11 @@ package text import ( "bytes" "io" + "orly.dev/chk" + "orly.dev/errorf" "github.com/templexxx/xhex" - "orly.dev/chk" - "orly.dev/errorf" "orly.dev/hex" ) diff --git a/text/helpers_test.go b/text/helpers_test.go index 839fcc3..8a79eb0 100644 --- a/text/helpers_test.go +++ b/text/helpers_test.go @@ -2,13 +2,13 @@ package text import ( "bytes" + "orly.dev/chk" "testing" "lukechampine.com/frand" - "github.com/minio/sha256-simd" - "orly.dev/chk" "orly.dev/hex" + "orly.dev/sha256" ) func TestUnmarshalHexArray(t *testing.T) { diff --git a/timestamp/timestamp.go b/timestamp/timestamp.go index dcc30f0..6f17990 100644 --- a/timestamp/timestamp.go +++ b/timestamp/timestamp.go @@ -4,13 +4,11 @@ package timestamp import ( "encoding/binary" + "orly.dev/chk" + "orly.dev/errorf" "time" "unsafe" - "golang.org/x/exp/constraints" - - "orly.dev/chk" - "orly.dev/errorf" "orly.dev/ints" ) @@ -20,10 +18,10 @@ type T struct{ V int64 } // New creates a new timestamp.T, as zero or optionally from teh first variadic parameter as // int64. -func New[V constraints.Integer](x ...V) (t *T) { +func New(x ...int64) (t *T) { t = &T{} if len(x) > 0 { - t.V = int64(x[0]) + t.V = x[0] } return } diff --git a/ubuntu_install_libsecp256k1.sh b/ubuntu_install_libsecp256k1.sh index 292f6a7..288df4c 100755 --- a/ubuntu_install_libsecp256k1.sh +++ b/ubuntu_install_libsecp256k1.sh @@ -9,6 +9,6 @@ git checkout v0.6.0 git submodule init git submodule update ./autogen.sh -./configure --enable-module-schnorrsig --enable-module-ecdh --enable-module-ellswift --prefix=/usr +./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr make -j1 sudo make install diff --git a/version/doc.go b/version/doc.go new file mode 100644 index 0000000..0115796 --- /dev/null +++ b/version/doc.go @@ -0,0 +1,2 @@ +// Package realy_lol is a nostr library, relay and associated tools. +package version diff --git a/version/version b/version/version index 95e94cd..83110d8 100644 --- a/version/version +++ b/version/version @@ -1 +1 @@ -v0.0.1 \ No newline at end of file +v1.14.3 \ No newline at end of file diff --git a/version/version.go b/version/version.go index 9b958d4..610d020 100644 --- a/version/version.go +++ b/version/version.go @@ -1,12 +1,12 @@ package version -import _ "embed" +import ( + _ "embed" +) //go:embed version var V string -var Name = "orly" +var Description = "relay powered by the orly framework" -var Description = "fast, simple nostr relay" - -var URL = "https://orly.dev" +var URL = "https://orly" diff --git a/ws/client.go b/ws/client.go new file mode 100644 index 0000000..9014fe5 --- /dev/null +++ b/ws/client.go @@ -0,0 +1,555 @@ +package ws + +import ( + "bytes" + "crypto/tls" + "net/http" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" + "sync" + "time" + + "github.com/gobwas/ws" + "github.com/gobwas/ws/wsutil" + "github.com/puzpuzpuz/xsync/v3" + + "orly.dev/atomic" + "orly.dev/auth" + "orly.dev/context" + "orly.dev/envelopes" + "orly.dev/envelopes/authenvelope" + "orly.dev/envelopes/closedenvelope" + "orly.dev/envelopes/countenvelope" + "orly.dev/envelopes/eoseenvelope" + "orly.dev/envelopes/eventenvelope" + "orly.dev/envelopes/noticeenvelope" + "orly.dev/envelopes/okenvelope" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/filters" + "orly.dev/kind" + "orly.dev/normalize" + "orly.dev/signer" +) + +var subscriptionIDCounter atomic.Int32 + +type Client struct { + closeMutex sync.Mutex + URL string + RequestHeader http.Header // e.g. for origin header + Connection *Connection + Subscriptions *xsync.MapOf[string, *Subscription] + ConnectionError error + connectionContext context.T // will be canceled when the connection closes + connectionContextCancel context.F + challenge []byte // NIP-42 challenge, we only keep the last + notices chan []byte // NIP-01 NOTICEs + okCallbacks *xsync.MapOf[string, func(bool, string)] + writeQueue chan writeRequest + subscriptionChannelCloseQueue chan *Subscription + signatureChecker func(*event.E) bool + AssumeValid bool // this will skip verifying signatures for events received from this relay +} + +type writeRequest struct { + msg []byte + answer chan error +} + +// NewRelay returns a new relay. The relay connection will be closed when the context is canceled. +func NewRelay(c context.T, url string, opts ...RelayOption) *Client { + ctx, cancel := context.Cancel(c) + r := &Client{ + URL: string(normalize.URL([]byte(url))), + connectionContext: ctx, + connectionContextCancel: cancel, + Subscriptions: xsync.NewMapOf[string, *Subscription](), + okCallbacks: xsync.NewMapOf[string, func( + bool, string, + )](), + writeQueue: make(chan writeRequest), + subscriptionChannelCloseQueue: make(chan *Subscription), + signatureChecker: func(e *event.E) bool { ok, _ := e.Verify(); return ok }, + } + + for _, opt := range opts { + opt.ApplyRelayOption(r) + } + + return r +} + +// RelayConnect returns a relay object connected to url. Once successfully connected, cancelling +// ctx has no effect. To close the connection, call r.Close(). +func RelayConnect(ctx context.T, url string, opts ...RelayOption) ( + *Client, error, +) { + r := NewRelay(context.Bg(), url, opts...) + err := r.Connect(ctx) + return r, err +} + +// RelayOption is the type of the argument passed for that. +type RelayOption interface { + ApplyRelayOption(*Client) +} + +var ( + _ RelayOption = (WithNoticeHandler)(nil) + _ RelayOption = (WithSignatureChecker)(nil) +) + +// WithNoticeHandler just takes notices and is expected to do something with them. when not +// given, defaults to logging the notices. +type WithNoticeHandler func(notice []byte) + +func (nh WithNoticeHandler) ApplyRelayOption(r *Client) { + r.notices = make(chan []byte) + go func() { + for notice := range r.notices { + nh(notice) + } + }() +} + +// WithSignatureChecker must be a function that checks the signature of an event and returns +// true or false. +type WithSignatureChecker func(*event.E) bool + +func (sc WithSignatureChecker) ApplyRelayOption(r *Client) { + r.signatureChecker = sc +} + +// String just returns the relay URL. +func (r *Client) String() string { + return r.URL +} + +// Context retrieves the context that is associated with this relay connection. +func (r *Client) Context() context.T { return r.connectionContext } + +// IsConnected returns true if the connection to this relay seems to be active. +func (r *Client) IsConnected() bool { return r.connectionContext.Err() == nil } + +// Connect tries to establish a websocket connection to r.URL. If the context expires before the +// connection is complete, an error is returned. Once successfully connected, context expiration +// has no effect: call r.Close to close the connection. +// +// The underlying relay connection will use a background context. If you want to pass a custom +// context to the underlying relay connection, use NewRelay() and then Client.Connect(). +func (r *Client) Connect(c context.T) error { return r.ConnectWithTLS(c, nil) } + +// ConnectWithTLS tries to establish a secured websocket connection to r.URL using customized +// tls.Config (CA's, etc). +func (r *Client) ConnectWithTLS(ctx context.T, tlsConfig *tls.Config) error { + if r.connectionContext == nil || r.Subscriptions == nil { + return errorf.E("relay must be initialized with a call to NewRelay()") + } + if r.URL == "" { + return errorf.E("invalid relay URL '%s'", r.URL) + } + if _, ok := ctx.Deadline(); !ok { + // if no timeout is set, force it to 7 seconds + var cancel context.F + ctx, cancel = context.Timeout(ctx, 7*time.Second) + defer cancel() + } + conn, err := NewConnection(ctx, r.URL, r.RequestHeader, tlsConfig) + if err != nil { + return errorf.E("error opening websocket to '%s': %w", r.URL, err) + } + r.Connection = conn + // ping every 29 seconds (??) + ticker := time.NewTicker(29 * time.Second) + // to be used when the connection is closed + go func() { + <-r.connectionContext.Done() + // close these things when the connection is closed + if r.notices != nil { + close(r.notices) + } + // stop the ticker + ticker.Stop() + // close all subscriptions + r.Subscriptions.Range( + func(_ string, sub *Subscription) bool { + go sub.Unsub() + return true + }, + ) + }() + // queue all write operations here so we don't do mutex spaghetti + go func() { + var err error + for { + select { + case <-ticker.C: + err = wsutil.WriteClientMessage( + r.Connection.conn, ws.OpPing, nil, + ) + if err != nil { + log.D.F( + "{%s} error writing ping: %v; closing websocket", r.URL, + err, + ) + r.Close() // this should trigger a context cancelation + return + } + case writeReq := <-r.writeQueue: + // all write requests will go through this to prevent races + if err = r.Connection.WriteMessage( + r.connectionContext, + writeReq.msg, + ); chk.T(err) { + writeReq.answer <- err + } + close(writeReq.answer) + case <-r.connectionContext.Done(): + // stop here + return + } + } + }() + // general message reader loop + go func() { + buf := new(bytes.Buffer) + for { + buf.Reset() + if err := conn.ReadMessage(r.connectionContext, buf); chk.T(err) { + r.ConnectionError = err + r.Close() + break + } + message := buf.Bytes() + log.D.F("{%s} %v\n", r.URL, message) + + var t string + if t, message, err = envelopes.Identify(message); chk.E(err) { + continue + } + switch t { + case noticeenvelope.L: + env := noticeenvelope.New() + if env, message, err = noticeenvelope.Parse(message); chk.E(err) { + continue + } + // see WithNoticeHandler + if r.notices != nil { + r.notices <- env.Message + } else { + log.E.F("NOTICE from %s: '%s'\n", r.URL, env.Message) + } + case authenvelope.L: + env := authenvelope.NewChallenge() + if env, message, err = authenvelope.ParseChallenge(message); chk.E(err) { + continue + } + if len(env.Challenge) == 0 { + continue + } + r.challenge = env.Challenge + case eventenvelope.L: + env := eventenvelope.NewResult() + if env, message, err = eventenvelope.ParseResult(message); chk.E(err) { + continue + } + if len(env.Subscription.T) == 0 { + continue + } + if sub, ok := r.Subscriptions.Load(env.Subscription.String()); !ok { + log.D.F( + "{%s} no subscription with id '%s'\n", r.URL, + env.Subscription, + ) + continue + } else { + // check if the event matches the desired filter, ignore otherwise + if !sub.Filters.Match(env.Event) { + log.D.F( + "{%s} filter does not match: %v ~ %v\n", r.URL, + sub.Filters, env.Event, + ) + continue + } + // check signature, ignore invalid, except from trusted (AssumeValid) relays + if !r.AssumeValid { + if ok = r.signatureChecker(env.Event); !ok { + log.E.F( + "{%s} bad signature on %s\n", r.URL, + env.Event.Id, + ) + continue + } + } + // dispatch this to the internal .events channel of the subscription + sub.dispatchEvent(env.Event) + } + case eoseenvelope.L: + env := eoseenvelope.New() + if env, message, err = eoseenvelope.Parse(message); chk.E(err) { + continue + } + if subscription, ok := r.Subscriptions.Load(env.Subscription.String()); ok { + subscription.dispatchEose() + } + case closedenvelope.L: + env := closedenvelope.New() + if env, message, err = closedenvelope.Parse(message); chk.E(err) { + continue + } + if subscription, ok := r.Subscriptions.Load(env.Subscription.String()); ok { + subscription.dispatchClosed(env.ReasonString()) + } + case countenvelope.L: + env := countenvelope.NewResponse() + if env, message, err = countenvelope.Parse(message); chk.E(err) { + continue + } + if subscription, ok := r.Subscriptions.Load(env.ID.String()); ok && subscription.countResult != nil { + subscription.countResult <- env.Count + } + case okenvelope.L: + env := okenvelope.New() + if env, message, err = okenvelope.Parse(message); chk.E(err) { + continue + } + if okCallback, exist := r.okCallbacks.Load(env.EventID.String()); exist { + okCallback(env.OK, env.ReasonString()) + } else { + log.I.F( + "{%s} got an unexpected OK message for event %s", r.URL, + env.EventID, + ) + } + } + } + }() + return nil +} + +// Write queues a message to be sent to the relay. +func (r *Client) Write(msg []byte) <-chan error { + ch := make(chan error) + select { + case r.writeQueue <- writeRequest{msg: msg, answer: ch}: + case <-r.connectionContext.Done(): + go func() { ch <- errorf.E("connection closed") }() + } + return ch +} + +// Publish sends an "EVENT" command to the relay r as in NIP-01 and waits for an OK response. +func (r *Client) Publish(c context.T, ev *event.E) error { + return r.publish( + c, ev, + ) +} + +// Auth sends an "AUTH" command client->relay as in NIP-42 and waits for an OK response. +func (r *Client) Auth(c context.T, sign signer.I) error { + authEvent := auth.CreateUnsigned(sign.Pub(), r.challenge, r.URL) + if err := authEvent.Sign(sign); chk.T(err) { + return errorf.E("error signing auth event: %w", err) + } + return r.publish(c, authEvent) +} + +// publish can be used both for EVENT and for AUTH +func (r *Client) publish(ctx context.T, ev *event.E) (err error) { + var cancel context.F + if _, ok := ctx.Deadline(); !ok { + // if no timeout is set, force it to 7 seconds + ctx, cancel = context.TimeoutCause( + ctx, 7*time.Second, + errorf.E("given up waiting for an OK"), + ) + defer cancel() + } else { + // otherwise make the context cancellable so we can stop everything upon receiving an "OK" + ctx, cancel = context.Cancel(ctx) + defer cancel() + } + // listen for an OK callback + gotOk := false + id := ev.IdString() + r.okCallbacks.Store( + id, func(ok bool, reason string) { + gotOk = true + if !ok { + err = errorf.E("msg: %s", reason) + } + cancel() + }, + ) + defer r.okCallbacks.Delete(id) + // publish event + var b []byte + if ev.Kind.Equal(kind.ClientAuthentication) { + if b = authenvelope.NewResponseWith(ev).Marshal(b); chk.E(err) { + return + } + } else { + if b = eventenvelope.NewSubmissionWith(ev).Marshal(b); chk.E(err) { + return + } + } + log.T.F("{%s} sending %s\n", r.URL, b) + if err = <-r.Write(b); chk.T(err) { + return err + } + for { + select { + case <-ctx.Done(): + // this will be called when we get an OK or when the context has been canceled + if gotOk { + return err + } + return ctx.Err() + case <-r.connectionContext.Done(): + // this is caused when we lose connectivity + return err + } + } +} + +// Subscribe sends a "REQ" command to the relay r as in NIP-01. +// Events are returned through the channel sub.Events. +// The subscription is closed when context ctx is cancelled ("CLOSE" in NIP-01). +// +// Remember to cancel subscriptions, either by calling `.Unsub()` on them or ensuring their `context.Context` will be canceled at some point. +// Failure to do that will result in a huge number of halted goroutines being created. +func (r *Client) Subscribe( + c context.T, ff *filters.T, + opts ...SubscriptionOption, +) (*Subscription, error) { + sub := r.PrepareSubscription(c, ff, opts...) + if r.Connection == nil { + return nil, errorf.E("not connected to %s", r.URL) + } + if err := sub.Fire(); chk.T(err) { + return nil, errorf.E( + "couldn't subscribe to %v at %s: %w", ff, r.URL, err, + ) + } + return sub, nil +} + +// PrepareSubscription creates a subscription, but doesn't fire it. +// +// Remember to cancel subscriptions, either by calling `.Unsub()` on them or ensuring their `context.Context` will be canceled at some point. +// Failure to do that will result in a huge number of halted goroutines being created. +func (r *Client) PrepareSubscription( + c context.T, ff *filters.T, + opts ...SubscriptionOption, +) *Subscription { + current := subscriptionIDCounter.Add(1) + c, cancel := context.Cancel(c) + sub := &Subscription{ + Relay: r, + Context: c, + cancel: cancel, + counter: int(current), + Events: make(event.C), + EndOfStoredEvents: make(chan struct{}, 1), + ClosedReason: make(chan string, 1), + Filters: ff, + } + for _, opt := range opts { + switch o := opt.(type) { + case WithLabel: + sub.label = string(o) + } + } + id := sub.GetID() + r.Subscriptions.Store(id.String(), sub) + // start handling events, eose, unsub etc: + go sub.start() + return sub +} + +// QuerySync is only used in tests. The realy query method is synchronous now anyway (it ensures +// sort order is respected). +func (r *Client) QuerySync( + ctx context.T, f *filter.F, + opts ...SubscriptionOption, +) ([]*event.E, error) { + sub, err := r.Subscribe(ctx, filters.New(f), opts...) + if err != nil { + return nil, err + } + + defer sub.Unsub() + + if _, ok := ctx.Deadline(); !ok { + // if no timeout is set, force it to 7 seconds + var cancel context.F + ctx, cancel = context.Timeout(ctx, 7*time.Second) + defer cancel() + } + + var events []*event.E + for { + select { + case evt := <-sub.Events: + if evt == nil { + // channel is closed + return events, nil + } + events = append(events, evt) + case <-sub.EndOfStoredEvents: + return events, nil + case <-ctx.Done(): + return events, nil + } + } +} + +// TODO: count is a dumb idea anyway, and nothing is using this +// func (r *Client) Count(c context.F, ff *filters.F, opts ...SubscriptionOption) (int, error) { +// sub := r.PrepareSubscription(c, ff, opts...) +// sub.countResult = make(chan int) +// +// if err := sub.Fire(); chk.F(err) { +// return 0, err +// } +// +// defer sub.Unsub() +// +// if _, ok := c.Deadline(); !ok { +// // if no timeout is set, force it to 7 seconds +// var cancel context.F +// c, cancel = context.Timeout(c, 7*time.Second) +// defer cancel() +// } +// +// for { +// select { +// case count := <-sub.countResult: +// return count, nil +// case <-c.Done(): +// return 0, c.Err() +// } +// } +// } + +// Close shuts down a websocket client connection. +func (r *Client) Close() error { + r.closeMutex.Lock() + defer r.closeMutex.Unlock() + if r.connectionContextCancel == nil { + return errorf.E("relay already closed") + } + r.connectionContextCancel() + r.connectionContextCancel = nil + if r.Connection == nil { + return errorf.E("relay not connected") + } + err := r.Connection.Close() + r.Connection = nil + if err != nil { + return err + } + return nil +} diff --git a/ws/client_test.go b/ws/client_test.go new file mode 100644 index 0000000..304fcee --- /dev/null +++ b/ws/client_test.go @@ -0,0 +1,271 @@ +package ws + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "orly.dev/chk" + "sync" + "testing" + "time" + + "golang.org/x/net/websocket" + + "orly.dev/envelopes/eventenvelope" + "orly.dev/envelopes/okenvelope" + "orly.dev/event" + "orly.dev/kind" + "orly.dev/normalize" + "orly.dev/p256k" + "orly.dev/tag" + "orly.dev/tags" + "orly.dev/timestamp" +) + +func TestPublish(t *testing.T) { + // test note to be sent over websocket + var err error + signer := &p256k.Signer{} + if err = signer.Generate(); chk.E(err) { + t.Fatal(err) + } + textNote := &event.E{ + Kind: kind.TextNote, + Content: []byte("hello"), + CreatedAt: timestamp.FromUnix(1672068534), // random fixed timestamp + Tags: tags.New(tag.New("foo", "bar")), + Pubkey: signer.Pub(), + } + if err = textNote.Sign(signer); chk.E(err) { + t.Fatalf("textNote.Sign: %v", err) + } + // fake relay server + var mu sync.Mutex // guards published to satisfy go test -race + var published bool + ws := newWebsocketServer( + func(conn *websocket.Conn) { + mu.Lock() + published = true + mu.Unlock() + // verify the client sent exactly the textNote + var raw []json.RawMessage + if err := websocket.JSON.Receive(conn, &raw); chk.T(err) { + t.Errorf("websocket.JSON.Receive: %v", err) + } + + if string(raw[0]) != fmt.Sprintf(`"%s"`, eventenvelope.L) { + t.Errorf("got type %s, want %s", raw[0], eventenvelope.L) + } + env := eventenvelope.NewSubmission() + if raw[1], err = env.Unmarshal(raw[1]); chk.E(err) { + t.Fatal(err) + } + // event := parseEventMessage(t, raw) + if !bytes.Equal(env.T.Serialize(), textNote.Serialize()) { + t.Errorf( + "received event:\n%s\nwant:\n%s", env.T.Serialize(), + textNote.Serialize(), + ) + } + // send back an ok nip-20 command result + var res []byte + if res = okenvelope.NewFrom( + textNote.Id, true, nil, + ).Marshal(res); chk.E(err) { + t.Fatal(err) + } + if err := websocket.Message.Send(conn, res); chk.T(err) { + t.Errorf("websocket.JSON.Send: %v", err) + } + }, + ) + defer ws.Close() + // connect a client and send the text note + rl := mustRelayConnect(ws.URL) + err = rl.Publish(context.Background(), textNote) + if err != nil { + t.Errorf("publish should have succeeded") + } + if !published { + t.Errorf("fake relay server saw no event") + } +} + +func TestPublishBlocked(t *testing.T) { + // test note to be sent over websocket + var err error + signer := &p256k.Signer{} + if err = signer.Generate(); chk.E(err) { + t.Fatal(err) + } + textNote := &event.E{ + Kind: kind.TextNote, + Content: []byte("hello"), + CreatedAt: timestamp.FromUnix(1672068534), // random fixed timestamp + Pubkey: signer.Pub(), + } + if err = textNote.Sign(signer); chk.E(err) { + t.Fatalf("textNote.Sign: %v", err) + } + // fake relay server + ws := newWebsocketServer( + func(conn *websocket.Conn) { + // discard received message; not interested + var raw []json.RawMessage + if err := websocket.JSON.Receive(conn, &raw); chk.T(err) { + t.Errorf("websocket.JSON.Receive: %v", err) + } + // send back a not ok nip-20 command result + var res []byte + if res = okenvelope.NewFrom( + textNote.Id, false, + normalize.Msg(normalize.Blocked, "no reason"), + ).Marshal(res); chk.E(err) { + t.Fatal(err) + } + if err := websocket.Message.Send(conn, res); chk.T(err) { + t.Errorf("websocket.JSON.Send: %v", err) + } + // res := []any{"OK", textNote.Id, false, "blocked"} + chk.E(websocket.JSON.Send(conn, res)) + }, + ) + defer ws.Close() + + // connect a client and send a text note + rl := mustRelayConnect(ws.URL) + if err = rl.Publish(context.Background(), textNote); !chk.E(err) { + t.Errorf("should have failed to publish") + } +} + +func TestPublishWriteFailed(t *testing.T) { + // test note to be sent over websocket + var err error + signer := &p256k.Signer{} + if err = signer.Generate(); chk.E(err) { + t.Fatal(err) + } + textNote := &event.E{ + Kind: kind.TextNote, + Content: []byte("hello"), + CreatedAt: timestamp.FromUnix(1672068534), // random fixed timestamp + Pubkey: signer.Pub(), + } + if err = textNote.Sign(signer); chk.E(err) { + t.Fatalf("textNote.Sign: %v", err) + } + // fake relay server + ws := newWebsocketServer( + func(conn *websocket.Conn) { + // reject receive - force send error + conn.Close() + }, + ) + defer ws.Close() + + // connect a client and send a text note + rl := mustRelayConnect(ws.URL) + // Force brief period of time so that publish always fails on closed socket. + time.Sleep(1 * time.Millisecond) + err = rl.Publish(context.Background(), textNote) + if err == nil { + t.Errorf("should have failed to publish") + } +} + +func TestConnectContext(t *testing.T) { + // fake relay server + var mu sync.Mutex // guards connected to satisfy go test -race + var connected bool + ws := newWebsocketServer( + func(conn *websocket.Conn) { + mu.Lock() + connected = true + mu.Unlock() + io.ReadAll(conn) // discard all input + }, + ) + defer ws.Close() + + // relay client + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + r, err := RelayConnect(ctx, ws.URL) + if err != nil { + t.Fatalf("RelayConnectContext: %v", err) + } + defer r.Close() + + mu.Lock() + defer mu.Unlock() + if !connected { + t.Error("fake relay server saw no client connect") + } +} + +func TestConnectContextCanceled(t *testing.T) { + // fake relay server + ws := newWebsocketServer(discardingHandler) + defer ws.Close() + + // relay client + ctx, cancel := context.WithCancel(context.Background()) + cancel() // make ctx expired + _, err := RelayConnect(ctx, ws.URL) + if !errors.Is(err, context.Canceled) { + t.Errorf( + "RelayConnectContext returned %v error; want context.Canceled", err, + ) + } +} + +func TestConnectWithOrigin(t *testing.T) { + // fake relay server + // default handler requires origin golang.org/x/net/websocket + ws := httptest.NewServer(websocket.Handler(discardingHandler)) + defer ws.Close() + + // relay client + r := NewRelay(context.Background(), string(normalize.URL(ws.URL))) + r.RequestHeader = http.Header{"origin": {"https://example.com"}} + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + err := r.Connect(ctx) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +func discardingHandler(conn *websocket.Conn) { + io.ReadAll(conn) // discard all input +} + +func newWebsocketServer(handler func(*websocket.Conn)) *httptest.Server { + return httptest.NewServer( + &websocket.Server{ + Handshake: anyOriginHandshake, + Handler: handler, + }, + ) +} + +// anyOriginHandshake is an alternative to default in golang.org/x/net/websocket +// which checks for origin. nostr client sends no origin and it makes no difference +// for the tests here anyway. +var anyOriginHandshake = func(conf *websocket.Config, r *http.Request) error { + return nil +} + +func mustRelayConnect(url string) *Client { + rl, err := RelayConnect(context.Background(), url) + if err != nil { + panic(err.Error()) + } + return rl +} diff --git a/ws/connection.go b/ws/connection.go index 58a2acb..5589729 100644 --- a/ws/connection.go +++ b/ws/connection.go @@ -8,16 +8,16 @@ import ( "io" "net" "net/http" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" "github.com/gobwas/httphead" "github.com/gobwas/ws" "github.com/gobwas/ws/wsflate" "github.com/gobwas/ws/wsutil" - "orly.dev/chk" "orly.dev/context" - "orly.dev/errorf" - "orly.dev/log" ) // Connection is an outbound client -> relay connection. diff --git a/ws/doc.go b/ws/doc.go new file mode 100644 index 0000000..4791f7b --- /dev/null +++ b/ws/doc.go @@ -0,0 +1,3 @@ +// Package ws provides both relay and client websocket implementations including +// a pool for fanning out to multiple relays, and managing subscriptions. +package ws diff --git a/ws/listener.go b/ws/listener.go index da44cc2..c04bf63 100644 --- a/ws/listener.go +++ b/ws/listener.go @@ -3,13 +3,12 @@ package ws import ( "net/http" - "orly.dev/helpers" "strings" "sync" "github.com/fasthttp/websocket" - "go.uber.org/atomic" + "orly.dev/atomic" ) // Listener is a websocket implementation for a relay listener. @@ -17,20 +16,57 @@ type Listener struct { mutex sync.Mutex Conn *websocket.Conn Request *http.Request - remote atomic.String + // challenge atomic.String + remote atomic.String + // authed atomic.String + // authRequested atomic.Bool } -// NewListener creates a new Listener for listening for inbound connections for -// a relay. +// NewListener creates a new Listener for listening for inbound connections for a relay. func NewListener( conn *websocket.Conn, req *http.Request, + challenge []byte, ) (ws *Listener) { ws = &Listener{Conn: conn, Request: req} - ws.remote.Store(helpers.GetRemoteFromReq(req)) + // ws.challenge.Store(string(challenge)) + // ws.authRequested.Store(false) + ws.setRemoteFromReq(req) return } +// AuthRequested returns whether the Listener has asked for auth from the client. +// func (ws *Listener) AuthRequested() bool { return ws.authRequested.Load() } + +// RequestAuth stores when auth has been required from a client. +// func (ws *Listener) RequestAuth() { ws.authRequested.Store(true) } + +func (ws *Listener) setRemoteFromReq(r *http.Request) { + var rr string + // reverse proxy should populate this field so we see the remote not the proxy + rem := r.Header.Get("X-Forwarded-For") + if rem == "" { + rr = r.RemoteAddr + } else { + splitted := strings.Split(rem, " ") + if len(splitted) == 1 { + rr = splitted[0] + } + if len(splitted) == 2 { + rr = splitted[1] + } + // in case upstream doesn't set this or we are directly listening instead of + // via reverse proxy or just if the header field is missing, put the + // connection remote address into the websocket state data. + } + if rr == "" { + // if that fails, fall back to the remote (probably the proxy, unless the realy is + // actually directly listening) + rr = ws.Conn.NetConn().RemoteAddr().String() + } + ws.remote.Store(rr) +} + // Write a message to send to a client. func (ws *Listener) Write(p []byte) (n int, err error) { ws.mutex.Lock() @@ -39,7 +75,8 @@ func (ws *Listener) Write(p []byte) (n int, err error) { if err != nil { n = len(p) if strings.Contains(err.Error(), "close sent") { - _ = ws.Close() + // log.I.ToSliceOfBytes("%s", err.Error()) + ws.Close() err = nil return } @@ -47,16 +84,45 @@ func (ws *Listener) Write(p []byte) (n int, err error) { return } -// Remote returns the stored remote address of the client. -func (ws *Listener) Remote() string { - return ws.remote.Load() +// WriteJSON encodes whatever into JSON and sends it to the client. +func (ws *Listener) WriteJSON(any interface{}) error { + ws.mutex.Lock() + defer ws.mutex.Unlock() + return ws.Conn.WriteJSON(any) } -// Req returns the http.Request associated with the client connection to the -// Listener. -func (ws *Listener) Req() *http.Request { - return ws.Request +// WriteMessage is a wrapper around the websocket WriteMessage, which includes a websocket +// message type identifier. +func (ws *Listener) WriteMessage(t int, b []byte) error { + ws.mutex.Lock() + defer ws.mutex.Unlock() + return ws.Conn.WriteMessage(t, b) } +// Challenge returns the current auth challenge string on the socket. +// func (ws *Listener) Challenge() string { return ws.challenge.Load() } + +// RealRemote returns the stored remote address of the client. +func (ws *Listener) RealRemote() string { return ws.remote.Load() } + +// Authed returns the public key the client has authed to the Listener. +// func (ws *Listener) Authed() string { return ws.authed.Load() } + +// AuthedBytes returns the authed public key that the client has authed to the listener, as a +// byte slice. +// func (ws *Listener) AuthedBytes() []byte { return []byte(ws.authed.Load()) } + +// IsAuthed returns whether the client has authed to the Listener. +// func (ws *Listener) IsAuthed() bool { return ws.authed.Load() != "" } + +// SetAuthed loads the pubkey (as a string of the binary pubkey). +// func (ws *Listener) SetAuthed(s string) { +// log.T.F("setting authed %0x", s) +// ws.authed.Store(s) +// } + +// Req returns the http.Request associated with the client connection to the Listener. +func (ws *Listener) Req() *http.Request { return ws.Request } + // Close the Listener connection from the Listener side. func (ws *Listener) Close() (err error) { return ws.Conn.Close() } diff --git a/ws/pool.go b/ws/pool.go new file mode 100644 index 0000000..50faa02 --- /dev/null +++ b/ws/pool.go @@ -0,0 +1,432 @@ +package ws + +import ( + "fmt" + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" + "slices" + "strings" + "sync" + "time" + "unsafe" + + "github.com/puzpuzpuz/xsync/v3" + + "orly.dev/context" + "orly.dev/event" + "orly.dev/filter" + "orly.dev/filters" + "orly.dev/normalize" + "orly.dev/signer" + "orly.dev/timestamp" +) + +var ( + seenAlreadyDropTick = 60 +) + +type Pool struct { + Relays *xsync.MapOf[string, *Client] + Context context.T + authHandler func() signer.I + cancel context.F + eventMiddleware []func(IncomingEvent) + // custom things not often used + SignatureChecker func(*event.E) bool +} + +type DirectedFilters struct { + Filters *filters.T + Client string +} + +type IncomingEvent struct { + Event *event.E + Client *Client +} + +func (ie IncomingEvent) String() string { + return fmt.Sprintf("[%s] >> %s", ie.Client.URL, ie.Event.Serialize()) +} + +type PoolOption interface { + ApplyPoolOption(*Pool) +} + +func NewPool(c context.T, opts ...PoolOption) *Pool { + ctx, cancel := context.Cancel(c) + + pool := &Pool{ + Relays: xsync.NewMapOf[string, *Client](), + + Context: ctx, + cancel: cancel, + } + + for _, opt := range opts { + opt.ApplyPoolOption(pool) + } + + return pool +} + +// WithAuthHandler must be a function that signs the auth event when called. +// it will be called whenever any relay in the pool returns a `CLOSED` message +// with the "auth-required:" prefix, only once for each relay +type WithAuthHandler func() signer.I + +func (h WithAuthHandler) ApplyPoolOption(pool *Pool) { + pool.authHandler = h +} + +// WithEventMiddleware is a function that will be called with all events received. +// more than one can be passed at a time. +type WithEventMiddleware func(IncomingEvent) + +func (h WithEventMiddleware) ApplyPoolOption(pool *Pool) { + pool.eventMiddleware = append(pool.eventMiddleware, h) +} + +var ( + _ PoolOption = (WithAuthHandler)(nil) + _ PoolOption = (WithEventMiddleware)(nil) +) + +// MaxLocks is the maximum number of sync.Mutex locks used in a pool todo: is this too few? +const MaxLocks = 50 + +var namedMutexPool = make([]sync.Mutex, MaxLocks) + +//go:noescape +//go:linkname memhash runtime.memhash +func memhash(p unsafe.Pointer, h, s uintptr) uintptr + +func namedLock(name string) (unlock func()) { + sptr := unsafe.StringData(name) + idx := uint64( + memhash( + unsafe.Pointer(sptr), 0, uintptr(len(name)), + ), + ) % MaxLocks + namedMutexPool[idx].Lock() + return namedMutexPool[idx].Unlock +} + +// EnsureRelay connects a pool to a relay or fails. +func (pool *Pool) EnsureRelay(url string) (*Client, error) { + nm := string(normalize.URL(url)) + defer namedLock(nm)() + + relay, ok := pool.Relays.Load(nm) + if ok && relay.IsConnected() { + // already connected, unlock and return + return relay, nil + } else { + var err error + // we use this ctx here so when the pool dies everything dies + ctx, cancel := context.Timeout(pool.Context, time.Second*15) + defer cancel() + + opts := make([]RelayOption, 0, 1+len(pool.eventMiddleware)) + if pool.SignatureChecker != nil { + opts = append(opts, WithSignatureChecker(pool.SignatureChecker)) + } + + if relay, err = RelayConnect(ctx, nm, opts...); chk.T(err) { + return nil, errorf.E("failed to connect: %w", err) + } + + pool.Relays.Store(nm, relay) + return relay, nil + } +} + +// SubMany opens a subscription with the given filters to multiple relays +// the subscriptions only end when the context is canceled +func (pool *Pool) SubMany( + c context.T, urls []string, ff *filters.T, +) chan IncomingEvent { + return pool.subMany(c, urls, ff, true) +} + +// SubManyNonUnique is like SubMany, but returns duplicate events if they come from different relays +func (pool *Pool) SubManyNonUnique( + c context.T, urls []string, + ff *filters.T, +) chan IncomingEvent { + return pool.subMany(c, urls, ff, false) +} + +func (pool *Pool) subMany( + c context.T, urls []string, ff *filters.T, + unique bool, +) chan IncomingEvent { + ctx, cancel := context.Cancel(c) + _ = cancel // do this so `go vet` will stop complaining + events := make(chan IncomingEvent) + seenAlready := xsync.NewMapOf[string, *timestamp.T]() + ticker := time.NewTicker(time.Duration(seenAlreadyDropTick) * time.Second) + eose := false + pending := xsync.NewCounter() + pending.Add(int64(len(urls))) + for u, url := range urls { + url = string(normalize.URL(url)) + urls[u] = url + if idx := slices.Index(urls, url); idx != u { + // skip duplicate relays in the list + continue + } + + go func(nm string) { + var err error + defer func() { + pending.Dec() + if pending.Value() == 0 { + close(events) + } + cancel() + }() + hasAuthed := false + interval := 3 * time.Second + for { + select { + case <-ctx.Done(): + return + default: + } + var sub *Subscription + var relay *Client + if relay, err = pool.EnsureRelay(nm); chk.T(err) { + goto reconnect + } + hasAuthed = false + subscribe: + if sub, err = relay.Subscribe(ctx, ff); chk.T(err) { + goto reconnect + } + go func() { + <-sub.EndOfStoredEvents + eose = true + }() + // reset interval when we get a good subscription + interval = 3 * time.Second + + for { + select { + case evt, more := <-sub.Events: + if !more { + // this means the connection was closed for weird reasons, like the server shut down + // so we will update the filters here to include only events seem from now on + // and try to reconnect until we succeed + now := timestamp.Now() + for i := range ff.F { + ff.F[i].Since = now + } + goto reconnect + } + ie := IncomingEvent{Event: evt, Client: relay} + for _, mh := range pool.eventMiddleware { + mh(ie) + } + if unique { + if _, seen := seenAlready.LoadOrStore( + evt.EventId().String(), + evt.CreatedAt, + ); seen { + continue + } + } + select { + case events <- ie: + case <-ctx.Done(): + } + case <-ticker.C: + if eose { + old := ×tamp.T{int64(timestamp.Now().Int() - seenAlreadyDropTick)} + seenAlready.Range( + func(id string, value *timestamp.T) bool { + if value.I64() < old.I64() { + seenAlready.Delete(id) + } + return true + }, + ) + } + case reason := <-sub.ClosedReason: + if strings.HasPrefix( + reason, + "auth-required:", + ) && pool.authHandler != nil && !hasAuthed { + // relay is requesting auth. if we can, we will perform auth and try again + if err = relay.Auth( + ctx, pool.authHandler(), + ); err == nil { + hasAuthed = true // so we don't keep doing AUTH again and again + goto subscribe + } + } else { + log.I.F("CLOSED from %s: '%s'\n", nm, reason) + } + return + case <-ctx.Done(): + return + } + } + reconnect: + // we will go back to the beginning of the loop and try to connect again and again + // until the context is canceled + time.Sleep(interval) + interval = interval * 17 / 10 // the next time we try we will wait longer + } + }(url) + } + + return events +} + +// SubManyEose is like SubMany, but it stops subscriptions and closes the channel when gets a EOSE +func (pool *Pool) SubManyEose( + c context.T, urls []string, ff *filters.T, +) chan IncomingEvent { + return pool.subManyEose(c, urls, ff, true) +} + +// SubManyEoseNonUnique is like SubManyEose, but returns duplicate events if they come from different relays +func (pool *Pool) SubManyEoseNonUnique( + c context.T, urls []string, + ff *filters.T, +) chan IncomingEvent { + return pool.subManyEose(c, urls, ff, false) +} + +func (pool *Pool) subManyEose( + c context.T, urls []string, ff *filters.T, + unique bool, +) chan IncomingEvent { + ctx, cancel := context.Cancel(c) + + events := make(chan IncomingEvent) + seenAlready := xsync.NewMapOf[string, bool]() + wg := sync.WaitGroup{} + wg.Add(len(urls)) + + go func() { + // this will happen when all subscriptions get an eose (or when they die) + wg.Wait() + cancel() + close(events) + }() + + for _, url := range urls { + go func(nm []byte) { + var err error + defer wg.Done() + var client *Client + if client, err = pool.EnsureRelay(string(nm)); chk.E(err) { + return + } + + hasAuthed := false + + subscribe: + var sub *Subscription + if sub, err = client.Subscribe(ctx, ff); chk.E(err) || sub == nil { + log.E.F("error subscribing to %s with %v: %s", client, ff, err) + return + } + for { + select { + case <-ctx.Done(): + return + case <-sub.EndOfStoredEvents: + return + case reason := <-sub.ClosedReason: + if strings.HasPrefix( + reason, + "auth-required:", + ) && pool.authHandler != nil && !hasAuthed { + // client is requesting auth. if we can we will perform auth and try again + err := client.Auth(ctx, pool.authHandler()) + if err == nil { + hasAuthed = true // so we don't keep doing AUTH again and again + goto subscribe + } + } + log.I.F("CLOSED from %s: '%s'\n", nm, reason) + return + case evt, more := <-sub.Events: + if !more { + return + } + + ie := IncomingEvent{Event: evt, Client: client} + for _, mh := range pool.eventMiddleware { + mh(ie) + } + + if unique { + if _, seen := seenAlready.LoadOrStore( + evt.EventId().String(), + true, + ); seen { + continue + } + } + + select { + case events <- ie: + case <-ctx.Done(): + return + } + } + } + }(normalize.URL(url)) + } + + return events +} + +// QuerySingle returns the first event returned by the first relay, cancels everything else. +func (pool *Pool) QuerySingle( + c context.T, urls []string, f *filter.F, +) *IncomingEvent { + ctx, cancel := context.Cancel(c) + defer cancel() + for ievt := range pool.SubManyEose(ctx, urls, filters.New(f)) { + return &ievt + } + return nil +} + +func (pool *Pool) batchedSubMany( + c context.T, + dfs []DirectedFilters, + subFn func(context.T, []string, *filters.T, bool) chan IncomingEvent, +) chan IncomingEvent { + res := make(chan IncomingEvent) + + for _, df := range dfs { + go func(df DirectedFilters) { + for ie := range subFn(c, []string{df.Client}, df.Filters, true) { + res <- ie + } + }(df) + } + + return res +} + +// BatchedSubMany fires subscriptions only to specific relays, but batches them when they are the same. +func (pool *Pool) BatchedSubMany( + c context.T, dfs []DirectedFilters, +) chan IncomingEvent { + return pool.batchedSubMany(c, dfs, pool.subMany) +} + +// BatchedSubManyEose is like BatchedSubMany, but ends upon receiving EOSE from all relays. +func (pool *Pool) BatchedSubManyEose( + c context.T, dfs []DirectedFilters, +) chan IncomingEvent { + return pool.batchedSubMany(c, dfs, pool.subManyEose) +} diff --git a/ws/subscription.go b/ws/subscription.go new file mode 100644 index 0000000..55730ed --- /dev/null +++ b/ws/subscription.go @@ -0,0 +1,189 @@ +package ws + +import ( + "orly.dev/chk" + "orly.dev/errorf" + "orly.dev/log" + "strconv" + "sync" + "sync/atomic" + + "orly.dev/context" + "orly.dev/envelopes/closeenvelope" + "orly.dev/envelopes/countenvelope" + "orly.dev/envelopes/reqenvelope" + "orly.dev/event" + "orly.dev/filters" + "orly.dev/subscription" +) + +// Subscription is a client interface for a subscription (what REQ turns into after EOSE). +type Subscription struct { + label string + counter int + + Relay *Client + Filters *filters.T + + // for this to be treated as a COUNT and not a REQ this must be set + countResult chan int + + // The Events channel emits all EVENTs that come in a Subscription will be closed when the + // subscription ends + Events event.C + mu sync.Mutex + + // The EndOfStoredEvents channel is closed when an EOSE comes for that subscription + EndOfStoredEvents chan struct{} + + // The ClosedReason channel emits the reason when a CLOSED message is received + ClosedReason chan string + + // Context will be .Done() when the subscription ends + Context context.T + + live atomic.Bool + eosed atomic.Bool + closed atomic.Bool + cancel context.F + + // This keeps track of the events we've received before the EOSE that we must dispatch + // before closing the EndOfStoredEvents channel + storedwg sync.WaitGroup +} + +// EventMessage is an event, with the associated relay URL attached. +type EventMessage struct { + Event event.E + Relay string +} + +// SubscriptionOption is the type of the argument passed for that. +// Some examples are WithLabel. +type SubscriptionOption interface { + IsSubscriptionOption() +} + +// WithLabel puts a label on the subscription (it is prepended to the automatic id) that is sent +// to relays. +type WithLabel string + +func (_ WithLabel) IsSubscriptionOption() {} + +var _ SubscriptionOption = (WithLabel)("") + +// GetID return the Nostr subscription Id as given to the Client +// it is a concatenation of the label and a serial number. +func (sub *Subscription) GetID() (id *subscription.Id) { + var err error + if id, err = subscription.NewId(sub.label + ":" + strconv.Itoa(sub.counter)); chk.E(err) { + return + } + return +} + +func (sub *Subscription) start() { + <-sub.Context.Done() + // the subscription ends once the context is canceled (if not already) + sub.Unsub() // this will set sub.live to false + + // do this so we don't have the possibility of closing the Events channel and then trying to + // send to it + sub.mu.Lock() + close(sub.Events) + sub.mu.Unlock() +} + +func (sub *Subscription) dispatchEvent(evt *event.E) { + added := false + if !sub.eosed.Load() { + sub.storedwg.Add(1) + added = true + } + + go func() { + sub.mu.Lock() + defer sub.mu.Unlock() + + if sub.live.Load() { + select { + case sub.Events <- evt: + case <-sub.Context.Done(): + } + } + + if added { + sub.storedwg.Done() + } + }() +} + +func (sub *Subscription) dispatchEose() { + if sub.eosed.CompareAndSwap(false, true) { + go func() { + sub.storedwg.Wait() + sub.EndOfStoredEvents <- struct{}{} + }() + } +} + +func (sub *Subscription) dispatchClosed(reason string) { + if sub.closed.CompareAndSwap(false, true) { + go func() { + sub.ClosedReason <- reason + }() + } +} + +// Unsub closes the subscription, sending "CLOSE" to realy as in NIP-01. Unsub() also closes the +// channel sub.Events and makes a new one. +func (sub *Subscription) Unsub() { + // cancel the context (if it's not canceled already) + sub.cancel() + // mark subscription as closed and send a CLOSE to the realy (naïve sync.Once + // implementation) + if sub.live.CompareAndSwap(true, false) { + sub.Close() + } + // remove subscription from our map + sub.Relay.Subscriptions.Delete(sub.GetID().String()) +} + +// Close just sends a CLOSE message. You probably want Unsub() instead. +func (sub *Subscription) Close() { + if sub.Relay.IsConnected() { + id := sub.GetID() + closeMsg := closeenvelope.NewFrom(id) + var b []byte + b = closeMsg.Marshal(nil) + log.D.F("{%s} sending %v", sub.Relay.URL, b) + <-sub.Relay.Write(b) + } +} + +// Sub sets sub.Filters and then calls sub.Fire(ctx). The subscription will be closed if the +// context expires. +func (sub *Subscription) Sub(_ context.T, ff *filters.T) { + sub.Filters = ff + sub.Fire() +} + +// Fire sends the "REQ" command to the realy. +func (sub *Subscription) Fire() (err error) { + id := sub.GetID() + + var b []byte + if sub.countResult == nil { + b = reqenvelope.NewFrom(id, sub.Filters).Marshal(b) + } else { + b = countenvelope.NewRequest(id, sub.Filters).Marshal(b) + } + log.I.F("{%s} sending %s", sub.Relay.URL, b) + sub.live.Store(true) + if err := <-sub.Relay.Write(b); chk.T(err) { + sub.cancel() + return errorf.E("failed to write: %w", err) + } + + return nil +} diff --git a/ws/subscription_test.go b/ws/subscription_test.go new file mode 100644 index 0000000..13f50e2 --- /dev/null +++ b/ws/subscription_test.go @@ -0,0 +1,130 @@ +package ws + +import ( + "fmt" + "sync/atomic" + "testing" + + "orly.dev/context" + "orly.dev/filter" + "orly.dev/filters" + "orly.dev/kind" + "orly.dev/kinds" + "orly.dev/tag" + "orly.dev/tags" +) + +const RELAY = "wss://nos.lol" + +// // test if we can fetch a couple of random events +// func TestSubscribeBasic(t *testing.F) { +// rl := mustRelayConnect(RELAY) +// defer rl.Close() +// var lim uint = 2 +// sub, err := rl.Subscribe(context.Bg(), +// filters.New(&filter.F{Kinds: kinds.New(kind.TextNote), Limit: &lim})) +// if err != nil { +// t.Fatalf("subscription failed: %v", err) +// return +// } +// timeout := time.After(5 * time.Second) +// n := 0 +// for { +// select { +// case event := <-sub.Events: +// if event == nil { +// t.Fatalf("event is nil: %v", event) +// } +// n++ +// case <-sub.EndOfStoredEvents: +// goto end +// case <-rl.Context().Done(): +// t.Errorf("connection closed: %v", rl.Context().Err()) +// goto end +// case <-timeout: +// t.Errorf("timeout") +// goto end +// } +// } +// end: +// if n != 2 { +// t.Fatalf("expected 2 events, got %d", n) +// } +// } + +// test if we can do multiple nested subscriptions +func TestNestedSubscriptions(t *testing.T) { + rl := mustRelayConnect(RELAY) + defer rl.Close() + + n := atomic.Uint32{} + _ = n + // fetch 2 replies to a note + var lim3 uint = 3 + sub, err := rl.Subscribe( + context.Bg(), + filters.New( + &filter.F{ + Kinds: kinds.New(kind.TextNote), + Tags: tags.New( + tag.New( + "e", + "0e34a74f8547e3b95d52a2543719b109fd0312aba144e2ef95cba043f42fe8c5", + ), + ), + Limit: &lim3, + }, + ), + ) + if err != nil { + t.Fatalf("subscription 1 failed: %v", err) + return + } + + for { + select { + case event := <-sub.Events: + // now fetch author of this + var lim uint = 1 + sub, err := rl.Subscribe( + context.Bg(), + filters.New( + &filter.F{ + Kinds: kinds.New(kind.ProfileMetadata), + Authors: tag.New(event.Pubkey), Limit: &lim, + }, + ), + ) + if err != nil { + t.Fatalf("subscription 2 failed: %v", err) + return + } + + for { + select { + case <-sub.Events: + // do another subscription here in "sync" mode, just so we're sure things are not blocking + rl.QuerySync(context.Bg(), &filter.F{Limit: &lim}) + + n.Add(1) + if n.Load() == 3 { + // if we get here it means the test passed + return + } + case <-sub.Context.Done(): + goto end + case <-sub.EndOfStoredEvents: + sub.Unsub() + } + } + end: + fmt.Println("") + case <-sub.EndOfStoredEvents: + sub.Unsub() + return + case <-sub.Context.Done(): + t.Fatalf("connection closed: %v", rl.Context().Err()) + return + } + } +}