This commit is contained in:
2025-11-02 01:36:35 +00:00
parent 9078f6a177
commit 4ce2205be5
4 changed files with 1 additions and 1011 deletions

View File

@@ -6,7 +6,7 @@ import (
)
// EcmultConst computes r = q * a using constant-time multiplication
// Uses simple binary method - GLV still has issues, reverting for now
// Uses simple binary method
func EcmultConst(r *GroupElementJacobian, a *GroupElementAffine, q *Scalar) {
if a.isInfinity() {
r.setInfinity()

457
glv.go
View File

@@ -1,457 +0,0 @@
package p256k1
// GLV Endomorphism constants and functions
// Based on libsecp256k1's implementation
// Lambda is a primitive cube root of unity modulo the curve order n
// lambda^3 == 1 mod n, lambda^2 + lambda == -1 mod n
// Represented as 8 uint32 values converted to 4 uint64 values
var lambdaConstant = Scalar{
d: [4]uint64{
(uint64(0x5363AD4C) << 32) | uint64(0xC05C30E0),
(uint64(0xA5261C02) << 32) | uint64(0x8812645A),
(uint64(0x122E22EA) << 32) | uint64(0x20816678),
(uint64(0xDF02967C) << 32) | uint64(0x1B23BD72),
},
}
// Beta is a primitive cube root of unity modulo the field prime p
// beta^3 == 1 mod p, beta^2 + beta == -1 mod p
// Used to compute lambda*P = (beta*x, y)
// Represented as 8 uint32 values in big-endian format
var betaConstant FieldElement
func init() {
// Beta constant: 0x7ae96a2b657c07106e64479eac3434e99cf0497512f58995c1396c28719501ee
betaBytes := []byte{
0x7a, 0xe9, 0x6a, 0x2b, 0x65, 0x7c, 0x07, 0x10,
0x6e, 0x64, 0x47, 0x9e, 0xac, 0x34, 0x34, 0xe9,
0x9c, 0xf0, 0x49, 0x75, 0x12, 0xf5, 0x89, 0x95,
0xc1, 0x39, 0x6c, 0x28, 0x71, 0x95, 0x01, 0xee,
}
betaConstant.setB32(betaBytes)
betaConstant.normalize()
}
// Constants for scalar_split_lambda
// SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) maps to our d[0]=d1|d0, d[1]=d3|d2, d[2]=d5|d4, d[3]=d7|d6
var (
// minus_b1 = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0xE4437ED6, 0x010E8828, 0x6F547FA9, 0x0ABFE4C3)
minusB1 = Scalar{
d: [4]uint64{
(uint64(0x6F547FA9) << 32) | uint64(0x0ABFE4C3), // d[0] = d1|d0
(uint64(0xE4437ED6) << 32) | uint64(0x010E8828), // d[1] = d3|d2
(uint64(0x00000000) << 32) | uint64(0x00000000), // d[2] = d5|d4
(uint64(0x00000000) << 32) | uint64(0x00000000), // d[3] = d7|d6
},
}
// minus_b2 = SECP256K1_SCALAR_CONST(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x8A280AC5, 0x0774346D, 0xD765CDA8, 0x3DB1562C)
minusB2 = Scalar{
d: [4]uint64{
(uint64(0xD765CDA8) << 32) | uint64(0x3DB1562C), // d[0] = d1|d0
(uint64(0x8A280AC5) << 32) | uint64(0x0774346D), // d[1] = d3|d2
(uint64(0xFFFFFFFF) << 32) | uint64(0xFFFFFFFE), // d[2] = d5|d4
(uint64(0xFFFFFFFF) << 32) | uint64(0xFFFFFFFF), // d[3] = d7|d6
},
}
// g1 = SECP256K1_SCALAR_CONST(0x3086D221, 0xA7D46BCD, 0xE86C90E4, 0x9284EB15, 0x3DAA8A14, 0x71E8CA7F, 0xE893209A, 0x45DBB031)
g1 = Scalar{
d: [4]uint64{
(uint64(0xE893209A) << 32) | uint64(0x45DBB031), // d[0] = d1|d0
(uint64(0x3DAA8A14) << 32) | uint64(0x71E8CA7F), // d[1] = d3|d2
(uint64(0xE86C90E4) << 32) | uint64(0x9284EB15), // d[2] = d5|d4
(uint64(0x3086D221) << 32) | uint64(0xA7D46BCD), // d[3] = d7|d6
},
}
// g2 = SECP256K1_SCALAR_CONST(0xE4437ED6, 0x010E8828, 0x6F547FA9, 0x0ABFE4C4, 0x221208AC, 0x9DF506C6, 0x1571B4AE, 0x8AC47F71)
g2 = Scalar{
d: [4]uint64{
(uint64(0x1571B4AE) << 32) | uint64(0x8AC47F71), // d[0] = d1|d0
(uint64(0x221208AC) << 32) | uint64(0x9DF506C6), // d[1] = d3|d2
(uint64(0x6F547FA9) << 32) | uint64(0x0ABFE4C4), // d[2] = d5|d4
(uint64(0xE4437ED6) << 32) | uint64(0x010E8828), // d[3] = d7|d6
},
}
)
// mulShiftVar multiplies two scalars and right-shifts the result by shift bits
// Returns round(k*g/2^shift)
func mulShiftVar(k, g *Scalar, shift uint) Scalar {
// Compute 512-bit product
var l [8]uint64
var temp Scalar
temp.mul512(l[:], k, g)
// Extract result by shifting
var result Scalar
shiftlimbs := shift / 64
shiftlow := shift % 64
shifthigh := 64 - shiftlow
if shift < 512 {
result.d[0] = l[shiftlimbs] >> shiftlow
if shift < 448 && shiftlow != 0 {
result.d[0] |= l[shiftlimbs+1] << shifthigh
}
}
if shift < 448 {
result.d[1] = l[shiftlimbs+1] >> shiftlow
if shift < 384 && shiftlow != 0 {
result.d[1] |= l[shiftlimbs+2] << shifthigh
}
}
if shift < 384 {
result.d[2] = l[shiftlimbs+2] >> shiftlow
if shift < 320 && shiftlow != 0 {
result.d[2] |= l[shiftlimbs+3] << shifthigh
}
}
if shift < 320 {
result.d[3] = l[shiftlimbs+3] >> shiftlow
}
// Round: add 1 if bit (shift-1) is set
// C code: secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
if shift > 0 {
bitPos := (shift - 1) & 0x3f // bit position within limb
limbIdx := (shift - 1) >> 6 // which limb
if limbIdx < 8 && (l[limbIdx]>>bitPos)&1 != 0 {
// Add 1 to result (rounding up)
var one Scalar
one.setInt(1)
result.add(&result, &one)
}
}
return result
}
// scalarSplitLambda splits a scalar k into r1 and r2 such that:
// r1 + lambda * r2 == k (mod n)
// r1 and r2 are in range (-2^128, 2^128) mod n
// This matches the C implementation exactly: secp256k1_scalar_split_lambda
func scalarSplitLambda(r1, r2, k *Scalar) {
var c1, c2 Scalar
// C code: secp256k1_scalar_mul_shift_var(&c1, k, &g1, 384);
// C code: secp256k1_scalar_mul_shift_var(&c2, k, &g2, 384);
c1 = mulShiftVar(k, &g1, 384)
c2 = mulShiftVar(k, &g2, 384)
// C code: secp256k1_scalar_mul(&c1, &c1, &minus_b1);
// C code: secp256k1_scalar_mul(&c2, &c2, &minus_b2);
c1.mul(&c1, &minusB1)
c2.mul(&c2, &minusB2)
// C code: secp256k1_scalar_add(r2, &c1, &c2);
r2.add(&c1, &c2)
// C code: secp256k1_scalar_mul(r1, r2, &secp256k1_const_lambda);
// C code: secp256k1_scalar_negate(r1, r1);
// C code: secp256k1_scalar_add(r1, r1, k);
r1.mul(r2, &lambdaConstant)
r1.negate(r1)
r1.add(r1, k)
}
// geMulLambda multiplies a point by lambda using the endomorphism:
// lambda * (x, y) = (beta * x, y)
func geMulLambda(r *GroupElementAffine, a *GroupElementAffine) {
*r = *a
// Multiply x coordinate by beta
r.x.mul(&r.x, &betaConstant)
r.x.normalize()
}
// Constants for GLV + signed-digit ecmult_const
const (
ecmultConstGroupSize = 5
ecmultConstTableSize = 1 << (ecmultConstGroupSize - 1) // 16
ecmultConstBits = 130 // Smallest multiple of 5 >= 129
ecmultConstGroups = (ecmultConstBits + ecmultConstGroupSize - 1) / ecmultConstGroupSize
)
// K constant for ECMULT_CONST_BITS=130
// K = (2^130 - 2^129 - 1)*(1 + lambda) mod n
var ecmultConstK = Scalar{
d: [4]uint64{
(uint64(0xa4e88a7d) << 32) | uint64(0xcb13034e),
(uint64(0xc2bdd6bf) << 32) | uint64(0x7c118d6b),
(uint64(0x589ae848) << 32) | uint64(0x26ba29e4),
(uint64(0xb5c2c1dc) << 32) | uint64(0xde9798d9),
},
}
// S_OFFSET = 2^128
// SECP256K1_SCALAR_CONST reorders parameters: d[0]=d1|d0, d[1]=d3|d2, d[2]=d5|d4, d[3]=d7|d6
// For 2^128 (bit 128), we need d[2] bit 0 set, which is d5=1, d4=0
// SECP256K1_SCALAR_CONST(0, 0, 0, 1, 0, 0, 0, 0) gives d[2]=2^32, not 2^128!
// For 2^128: SECP256K1_SCALAR_CONST(0, 0, 1, 0, 0, 0, 0, 0) -> d[2] = 1<<32|0 = 2^32... wait
// Actually: SECP256K1_SCALAR_CONST(0, 0, 0, 0, 1, 0, 0, 0) -> d[2] = 0<<32|1 = 1, which is bit 128
var sOffset = Scalar{
d: [4]uint64{0, 0, 1, 0}, // d[2] = 1 means bit 128 is set
}
// signedDigitTableGet performs signed-digit table lookup
// Given a table of odd multiples [1*P, 3*P, ..., 15*P] and an n-bit value,
// returns the signed-digit representation C_n(n, P)
// This matches the ECMULT_CONST_TABLE_GET_GE macro exactly
func signedDigitTableGet(pre []GroupElementAffine, n uint32) GroupElementAffine {
// C code: volatile unsigned int negative = ((n) >> (ECMULT_CONST_GROUP_SIZE - 1)) ^ 1;
// If the top bit of n is 0, we want the negation.
negative := ((n >> (ecmultConstGroupSize - 1)) ^ 1) != 0
// Compute index: index = ((unsigned int)(-negative) ^ n) & ((1U << (ECMULT_CONST_GROUP_SIZE - 1)) - 1U)
var negMask uint32
if negative {
negMask = 0xFFFFFFFF
} else {
negMask = 0
}
index := (negMask ^ n) & ((1 << (ecmultConstGroupSize - 1)) - 1)
// Constant-time lookup - initialize with pre[0], then conditionally update using cmov
var result GroupElementAffine
result = pre[0]
// C code: for (m = 1; m < ECMULT_CONST_TABLE_SIZE; m++) { secp256k1_fe_cmov(&(r)->x, &(pre)[m].x, m == index); ... }
for i := uint32(1); i < ecmultConstTableSize; i++ {
flag := 0
if i == index {
flag = 1
}
result.x.cmov(&pre[i].x, flag)
result.y.cmov(&pre[i].y, flag)
}
// C code: (r)->infinity = 0;
result.infinity = false
// C code: secp256k1_fe_negate(&neg_y, &(r)->y, 1);
// C code: secp256k1_fe_cmov(&(r)->y, &neg_y, negative);
var negY FieldElement
negY.negate(&result.y, 1)
flag := 0
if negative {
flag = 1
}
result.y.cmov(&negY, flag)
result.y.normalize()
return result
}
// buildOddMultiplesTableWithGlobalZ builds a table of odd multiples with global Z
// Implements effective affine technique like C code: secp256k1_ecmult_odd_multiples_table + secp256k1_ge_table_set_globalz
func buildOddMultiplesTableWithGlobalZ(n int, aJac *GroupElementJacobian) ([]GroupElementAffine, *FieldElement) {
if aJac.isInfinity() {
return nil, nil
}
pre := make([]GroupElementAffine, n)
zr := make([]FieldElement, n)
// Build 2*a (called 'd' in C code)
var d GroupElementJacobian
d.double(aJac)
// Use effective affine technique: work on isomorphic curve where d.z is the isomorphism constant
// Set d_ge = affine representation of d (for faster additions)
// C code: secp256k1_ge_set_xy(&d_ge, &d.x, &d.y);
var dGe GroupElementAffine
dGe.setXY(&d.x, &d.y)
// Set pre[0] = a with z-inverse d.z (using setGEJ_zinv equivalent)
// This represents a on the isomorphic curve
// C code: secp256k1_ge_set_gej_zinv(&pre_a[0], a, &d.z);
// Save d.z BEFORE calling inv (which modifies input)
var dZ FieldElement
dZ = d.z
var dZInv FieldElement
dZInv.inv(&d.z)
var zi2, zi3 FieldElement
zi2.sqr(&dZInv)
zi3.mul(&zi2, &dZInv)
pre[0].x.mul(&aJac.x, &zi2)
pre[0].y.mul(&aJac.y, &zi3)
pre[0].infinity = false
zr[0] = dZ // Store z ratio (C code: zr[0] = d.z)
// Build remaining odd multiples using effective affine additions
// ai represents the current point in the isomorphic curve (Jacobian form)
var ai GroupElementJacobian
ai.setGE(&pre[0])
// C code: ai.z = a->z; (line 98)
ai.z = aJac.z
// Build odd multiples: pre[i] = (2*i+1)*a
for i := 1; i < n; i++ {
// ai = ai + d_ge (in the isomorphic curve) - this is faster than full Jacobian addition
// C code: secp256k1_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i])
// This computes zr[i] = h internally
ai.addGEWithZR(&ai, &dGe, &zr[i])
// Store x, y coordinates (affine representation on isomorphic curve)
// C code: secp256k1_ge_set_xy(&pre_a[i], &ai.x, &ai.y)
pre[i].x = ai.x
pre[i].y = ai.y
pre[i].infinity = false
}
// Apply ge_table_set_globalz equivalent: bring all points to same Z denominator
// C code: secp256k1_ge_table_set_globalz(ECMULT_CONST_TABLE_SIZE, pre, zr)
if n > 0 {
i := n - 1
// Ensure all y values are in weak normal form for fast negation (C code line 302)
pre[i].y.normalizeWeak()
var zs FieldElement
zs = zr[i] // zs = zr[n-1]
// Work backwards, using z-ratios to scale x/y values
// C code: while (i > 0) { ... secp256k1_ge_set_ge_zinv(&a[i], &a[i], &zs); }
for i > 0 {
if i != n-1 {
// C code: secp256k1_fe_mul(&zs, &zs, &zr[i])
// Multiply zs by zr[i] BEFORE decrementing i
zs.mul(&zs, &zr[i])
}
i--
// Scale pre[i] by zs inverse: pre[i] = pre[i] with z-inverse zs
// C code: secp256k1_ge_set_ge_zinv(&a[i], &a[i], &zs)
var zsInv FieldElement
zsInv.inv(&zs)
var zsInv2, zsInv3 FieldElement
zsInv2.sqr(&zsInv)
zsInv3.mul(&zsInv2, &zsInv)
pre[i].x.mul(&pre[i].x, &zsInv2)
pre[i].y.mul(&pre[i].y, &zsInv3)
}
}
// Compute global_z = ai.z * d.z (undoing isomorphism)
// C code: secp256k1_fe_mul(z, &ai.z, &d.z)
var globalZ FieldElement
globalZ.mul(&ai.z, &d.z)
globalZ.normalize()
return pre, &globalZ
}
func buildOddMultiplesTableSimple(n int, aJac *GroupElementJacobian) []GroupElementAffine {
if aJac.isInfinity() {
return nil
}
preJac := make([]GroupElementJacobian, n)
preAff := make([]GroupElementAffine, n)
// preJac[0] = 1*a
preJac[0] = *aJac
// d = 2*a
var d GroupElementJacobian
d.double(aJac)
for i := 1; i < n; i++ {
preJac[i].addVar(&preJac[i-1], &d)
}
// Batch convert to affine
z := make([]FieldElement, n)
for i := 0; i < n; i++ {
z[i] = preJac[i].z
}
zInv := make([]FieldElement, n)
batchInverse(zInv, z)
for i := 0; i < n; i++ {
var zi2, zi3 FieldElement
zi2.sqr(&zInv[i])
zi3.mul(&zi2, &zInv[i])
preAff[i].x.mul(&preJac[i].x, &zi2)
preAff[i].y.mul(&preJac[i].y, &zi3)
preAff[i].infinity = false
}
return preAff
}
// ecmultConstGLV computes r = q * a using GLV endomorphism + signed-digit method
// This matches the C libsecp256k1 secp256k1_ecmult_const implementation exactly
func ecmultConstGLV(r *GroupElementJacobian, a *GroupElementAffine, q *Scalar) {
// C code: if (secp256k1_ge_is_infinity(a)) { secp256k1_gej_set_infinity(r); return; }
if a.isInfinity() {
r.setInfinity()
return
}
// Step 1: Compute v1 and v2 (C code lines 207-212)
// secp256k1_scalar_add(&s, q, &secp256k1_ecmult_const_K);
// secp256k1_scalar_half(&s, &s);
// secp256k1_scalar_split_lambda(&v1, &v2, &s);
// secp256k1_scalar_add(&v1, &v1, &S_OFFSET);
// secp256k1_scalar_add(&v2, &v2, &S_OFFSET);
var s, v1, v2 Scalar
s.add(q, &ecmultConstK)
s.half(&s)
scalarSplitLambda(&v1, &v2, &s)
v1.add(&v1, &sOffset)
v2.add(&v2, &sOffset)
// Step 2: Build precomputation tables (C code lines 228-232)
// secp256k1_gej_set_ge(r, a);
// secp256k1_ecmult_const_odd_multiples_table_globalz(pre_a, &global_z, r);
// for (i = 0; i < ECMULT_CONST_TABLE_SIZE; i++) {
// secp256k1_ge_mul_lambda(&pre_a_lam[i], &pre_a[i]);
// }
var aJac GroupElementJacobian
aJac.setGE(a)
// TEMPORARILY use simple table building to isolate the bug
preA := buildOddMultiplesTableSimple(ecmultConstTableSize, &aJac)
var globalZ *FieldElement = nil // No global Z correction for now
preALam := make([]GroupElementAffine, ecmultConstTableSize)
for i := 0; i < ecmultConstTableSize; i++ {
geMulLambda(&preALam[i], &preA[i])
}
// Step 3: Main loop (C code lines 244-264)
// This is the key difference - C processes both v1 and v2 in a SINGLE loop
for group := ecmultConstGroups - 1; group >= 0; group-- {
// C code: unsigned int bits1 = secp256k1_scalar_get_bits_var(&v1, group * ECMULT_CONST_GROUP_SIZE, ECMULT_CONST_GROUP_SIZE);
// C code: unsigned int bits2 = secp256k1_scalar_get_bits_var(&v2, group * ECMULT_CONST_GROUP_SIZE, ECMULT_CONST_GROUP_SIZE);
bitOffset := uint(group * ecmultConstGroupSize)
bits1 := uint32(v1.getBits(bitOffset, ecmultConstGroupSize))
bits2 := uint32(v2.getBits(bitOffset, ecmultConstGroupSize))
// C code: ECMULT_CONST_TABLE_GET_GE(&t, pre_a, bits1);
var t GroupElementAffine
t = signedDigitTableGet(preA, bits1)
if group == ecmultConstGroups-1 {
// C code: secp256k1_gej_set_ge(r, &t);
r.setGE(&t)
} else {
// C code: for (j = 0; j < ECMULT_CONST_GROUP_SIZE; ++j) { secp256k1_gej_double(r, r); }
// C code: secp256k1_gej_add_ge(r, r, &t);
for j := 0; j < ecmultConstGroupSize; j++ {
r.double(r)
}
r.addGE(r, &t)
}
// C code: ECMULT_CONST_TABLE_GET_GE(&t, pre_a_lam, bits2);
// C code: secp256k1_gej_add_ge(r, r, &t);
t = signedDigitTableGet(preALam, bits2)
r.addGE(r, &t)
}
// Step 4: Apply global Z correction (C code line 267)
// C code: secp256k1_fe_mul(&r->z, &r->z, &global_z);
if globalZ != nil && !globalZ.isZero() && !r.isInfinity() {
r.z.mul(&r.z, globalZ)
r.z.normalize()
}
}

View File

@@ -1,280 +0,0 @@
package p256k1
import (
"testing"
)
// TestScalarSplitLambda verifies that scalarSplitLambda correctly splits scalars
// Property: r1 + lambda * r2 == k (mod n)
func TestScalarSplitLambda(t *testing.T) {
testCases := []struct {
name string
k *Scalar
}{
{
name: "one",
k: func() *Scalar { var s Scalar; s.setInt(1); return &s }(),
},
{
name: "small_value",
k: func() *Scalar { var s Scalar; s.setInt(12345); return &s }(),
},
{
name: "large_value",
k: func() *Scalar {
var s Scalar
// Set to a large value less than group order
bytes := [32]byte{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE,
0xBA, 0xAE, 0xDC, 0xE6, 0xAF, 0x48, 0xA0, 0x3B,
0xBF, 0xD2, 0x5E, 0x8C, 0xD0, 0x36, 0x41, 0x3F,
}
s.setB32(bytes[:])
return &s
}(),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var r1, r2 Scalar
scalarSplitLambda(&r1, &r2, tc.k)
// Verify: r1 + lambda * r2 == k (mod n)
var lambdaR2, sum Scalar
lambdaR2.mul(&r2, &lambdaConstant)
sum.add(&r1, &lambdaR2)
// Compare with k
if !sum.equal(tc.k) {
t.Errorf("r1 + lambda*r2 != k\nr1: %v\nr2: %v\nlambda*r2: %v\nsum: %v\nk: %v",
r1, r2, lambdaR2, sum, tc.k)
}
// Verify bounds: |r1| < 2^128 and |r2| < 2^128 (mod n)
// Check if r1 < 2^128 or -r1 mod n < 2^128
var r1Bytes [32]byte
r1.getB32(r1Bytes[:])
// Check if first 16 bytes are zero (meaning < 2^128)
r1Small := true
for i := 0; i < 16; i++ {
if r1Bytes[i] != 0 {
r1Small = false
break
}
}
// If r1 is not small, check -r1 mod n
if !r1Small {
var negR1 Scalar
negR1.negate(&r1)
var negR1Bytes [32]byte
negR1.getB32(negR1Bytes[:])
negR1Small := true
for i := 0; i < 16; i++ {
if negR1Bytes[i] != 0 {
negR1Small = false
break
}
}
if !negR1Small {
t.Errorf("r1 not in range (-2^128, 2^128): r1=%v, -r1=%v", r1Bytes, negR1Bytes)
}
}
// Same for r2
var r2Bytes [32]byte
r2.getB32(r2Bytes[:])
r2Small := true
for i := 0; i < 16; i++ {
if r2Bytes[i] != 0 {
r2Small = false
break
}
}
if !r2Small {
var negR2 Scalar
negR2.negate(&r2)
var negR2Bytes [32]byte
negR2.getB32(negR2Bytes[:])
negR2Small := true
for i := 0; i < 16; i++ {
if negR2Bytes[i] != 0 {
negR2Small = false
break
}
}
if !negR2Small {
t.Errorf("r2 not in range (-2^128, 2^128): r2=%v, -r2=%v", r2Bytes, negR2Bytes)
}
}
})
}
}
// TestScalarSplitLambdaRandom tests with random scalars
func TestScalarSplitLambdaRandom(t *testing.T) {
for i := 0; i < 100; i++ {
var k Scalar
k.setInt(uint(i + 1))
var r1, r2 Scalar
scalarSplitLambda(&r1, &r2, &k)
// Verify: r1 + lambda * r2 == k (mod n)
var lambdaR2, sum Scalar
lambdaR2.mul(&r2, &lambdaConstant)
sum.add(&r1, &lambdaR2)
if !sum.equal(&k) {
t.Errorf("Random test %d: r1 + lambda*r2 != k", i)
}
}
}
// TestGeMulLambda verifies that geMulLambda correctly multiplies points by lambda
// Property: lambda * (x, y) = (beta * x, y)
func TestGeMulLambda(t *testing.T) {
// Test with generator point
var g GroupElementAffine
g.setXOVar(&FieldElementOne, false)
var lambdaG GroupElementAffine
geMulLambda(&lambdaG, &g)
// Verify: lambdaG.x == beta * g.x
var expectedX FieldElement
expectedX.mul(&g.x, &betaConstant)
expectedX.normalize()
lambdaG.x.normalize()
if !lambdaG.x.equal(&expectedX) {
t.Errorf("geMulLambda: x coordinate incorrect\nexpected: %v\ngot: %v", expectedX, lambdaG.x)
}
// Verify: lambdaG.y == g.y
g.y.normalize()
lambdaG.y.normalize()
if !lambdaG.y.equal(&g.y) {
t.Errorf("geMulLambda: y coordinate incorrect\nexpected: %v\ngot: %v", g.y, lambdaG.y)
}
}
// TestMulShiftVar verifies mulShiftVar matches C implementation behavior
func TestMulShiftVar(t *testing.T) {
var k, g Scalar
k.setInt(12345)
g.setInt(67890)
result := mulShiftVar(&k, &g, 384)
// Verify result is approximately k*g/2^384
// This is a rough check - exact verification requires comparing with C code
var expected Scalar
expected.mul(&k, &g)
// Expected should be approximately result * 2^384, but we can't easily verify this
// Just check that result is reasonable (not zero, not too large)
if result.isZero() {
t.Error("mulShiftVar result should not be zero")
}
// Test with shift = 0
result0 := mulShiftVar(&k, &g, 0)
expected0 := Scalar{}
expected0.mul(&k, &g)
if !result0.equal(&expected0) {
t.Error("mulShiftVar with shift=0 should equal multiplication")
}
}
// TestHalf verifies half operation
func TestHalf(t *testing.T) {
testCases := []struct {
name string
input uint
expected uint
}{
{"even", 14, 7},
{"odd", 7, 4}, // 7/2 = 3.5 -> rounds to 4 in modular arithmetic
{"zero", 0, 0},
{"one", 1, 1}, // 1/2 = 0.5 -> rounds to 1 (or (n+1)/2 mod n)
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
var input, half, doubled Scalar
input.setInt(tc.input)
half.half(&input)
doubled.add(&half, &half)
// Verify: 2 * half == input (mod n)
if !doubled.equal(&input) {
t.Errorf("2 * half != input: input=%d, half=%v, doubled=%v",
tc.input, half, doubled)
}
})
}
}
// TestEcmultConstGLVCompare compares GLV implementation with simple binary method
func TestEcmultConstGLVCompare(t *testing.T) {
// Test with generator point
var g GroupElementAffine
g.setXOVar(&FieldElementOne, false)
testScalars := []struct {
name string
q *Scalar
}{
{"one", func() *Scalar { var s Scalar; s.setInt(1); return &s }()},
{"small", func() *Scalar { var s Scalar; s.setInt(12345); return &s }()},
{"medium", func() *Scalar { var s Scalar; s.setInt(0x12345678); return &s }()},
}
for _, tc := range testScalars {
t.Run(tc.name, func(t *testing.T) {
// Compute using simple binary method (reference)
var r1 GroupElementJacobian
var gJac GroupElementJacobian
gJac.setGE(&g)
r1.setInfinity()
var base GroupElementJacobian
base = gJac
for i := 0; i < 256; i++ {
if i > 0 {
r1.double(&r1)
}
bit := tc.q.getBits(uint(255-i), 1)
if bit != 0 {
if r1.isInfinity() {
r1 = base
} else {
r1.addVar(&r1, &base)
}
}
}
// Compute using GLV
var r2 GroupElementJacobian
ecmultConstGLV(&r2, &g, tc.q)
// Convert both to affine for comparison
var r1Aff, r2Aff GroupElementAffine
r1Aff.setGEJ(&r1)
r2Aff.setGEJ(&r2)
// Compare
if !r1Aff.equal(&r2Aff) {
t.Errorf("GLV result differs from reference\nr1: %v\nr2: %v", r1Aff, r2Aff)
}
})
}
}

View File

@@ -1,273 +0,0 @@
.globl secp256k1_schnorrsig_verify
.type secp256k1_schnorrsig_verify, @function
secp256k1_schnorrsig_verify:
.LFB340:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
movq %rdi, %r14
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $2696, %rsp
.cfi_def_cfa_offset 2752
movq %fs:40, %rax
movq %rax, 2680(%rsp)
xorl %eax, %eax
testq %rsi, %rsi
je .L1384
movq %rsi, %rbp
movq %rdx, %r12
movq %rcx, %r15
movq %r8, %r13
testq %rdx, %rdx
jne .L1362
testq %rcx, %rcx
jne .L1385
.L1362:
testq %r13, %r13
je .L1386
leaq 176(%rsp), %rdi
movq %rbp, %rsi
call secp256k1_fe_impl_set_b32_mod
movq 200(%rsp), %r9
movq 192(%rsp), %r10
movabsq $4503599627370495, %rdx
movq 184(%rsp), %r11
movq 208(%rsp), %rbx
movq %r9, %rax
movq 176(%rsp), %rcx
andq %r10, %rax
movq %rbx, 8(%rsp)
andq %r11, %rax
movq %rcx, 16(%rsp)
cmpq %rdx, %rax
jne .L1364
movabsq $281474976710655, %rax
cmpq %rax, %rbx
movabsq $4503595332402222, %rax
sete %dl
cmpq %rax, %rcx
seta %al
testb %al, %dl
je .L1364
.L1365:
xorl %ebx, %ebx
.L1359:
movq 2680(%rsp), %rax
subq %fs:40, %rax
jne .L1387
addq $2696, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
movl %ebx, %eax
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.p2align 4,,10
.p2align 3
.L1364:
.cfi_restore_state
leaq 112(%rsp), %rax
leaq 76(%rsp), %rdx
movq %r11, 48(%rsp)
leaq 32(%rbp), %rsi
movq %rax, %rdi
movq %r10, 40(%rsp)
movq %r9, 32(%rsp)
movq %rax, 24(%rsp)
call secp256k1_scalar_set_b32
movl 76(%rsp), %ebx
testl %ebx, %ebx
jne .L1365
leaq 224(%rsp), %rsi
movq %r13, %rdx
movq %r14, %rdi
movq %rsi, 56(%rsp)
call secp256k1_pubkey_load
testl %eax, %eax
je .L1365
leaq 2640(%rsp), %r8
movq 56(%rsp), %rsi
movq %r12, %rdx
leaq 144(%rsp), %r14
movq %r8, %rdi
leaq 576(%rsp), %r13
leaq 1600(%rsp), %r12
call secp256k1_fe_impl_get_b32
movq %r15, %rcx
movq %rbp, %rsi
movq %r14, %rdi
call secp256k1_schnorrsig_challenge
movq %r14, %rsi
movq %r14, %rdi
leaq 896(%rsp), %rbp
call secp256k1_scalar_negate
movl 304(%rsp), %edx
movq 24(%rsp), %r8
movq %r14, %rcx
movdqa 224(%rsp), %xmm0
leaq 80(%rsp), %rdi
movdqa 240(%rsp), %xmm1
leaq 320(%rsp), %rsi
movl %edx, 568(%rsp)
movq %rsi, %r14
movq 256(%rsp), %rdx
movdqu 264(%rsp), %xmm2
movdqu 280(%rsp), %xmm3
movq $1, 528(%rsp)
movq %rdx, 480(%rsp)
movq 296(%rsp), %rdx
movq $0, 560(%rsp)
movq %rdx, 520(%rsp)
leaq 448(%rsp), %rdx
movq $0, 552(%rsp)
movq $0, 544(%rsp)
movq $0, 536(%rsp)
movq %r13, 80(%rsp)
movq %rbp, 88(%rsp)
movq %r12, 96(%rsp)
movaps %xmm0, 448(%rsp)
movaps %xmm1, 464(%rsp)
movups %xmm2, 488(%rsp)
movups %xmm3, 504(%rsp)
call secp256k1_ecmult_strauss_wnaf.constprop.0
movl 440(%rsp), %eax
testl %eax, %eax
jne .L1359
movq 432(%rsp), %rax
movq %rbp, %rdi
leaq 400(%rsp), %r15
movdqa 400(%rsp), %xmm4
movdqa 416(%rsp), %xmm5
movq %rax, 928(%rsp)
movaps %xmm4, 896(%rsp)
movaps %xmm5, 912(%rsp)
call secp256k1_fe_impl_normalize_var
movq %r12, %rdi
movq %rbp, %rsi
call secp256k1_fe_to_signed62
leaq secp256k1_const_modinfo_fe(%rip), %rsi
call secp256k1_modinv64_var
movq %r12, %rsi
movq %r15, %rdi
call secp256k1_fe_from_signed62
movq %r15, %rsi
movq %r13, %rdi
call secp256k1_fe_sqr_inner
movq %r13, %rdx
movq %r15, %rsi
movq %rbp, %rdi
call secp256k1_fe_mul_inner
movq %r13, %rdx
movq %r14, %rsi
movq %r14, %rdi
call secp256k1_fe_mul_inner
leaq 360(%rsp), %rdi
movq %rbp, %rdx
movq %rdi, %rsi
call secp256k1_fe_mul_inner
movq 352(%rsp), %rax
movdqa 320(%rsp), %xmm6
leaq 1640(%rsp), %rdi
movdqa 336(%rsp), %xmm7
movdqu 360(%rsp), %xmm4
movq $1, 400(%rsp)
movq %rax, 1632(%rsp)
movdqu 376(%rsp), %xmm5
movq 392(%rsp), %rax
movq $0, 432(%rsp)
movq $0, 424(%rsp)
movq $0, 416(%rsp)
movq $0, 408(%rsp)
movl $0, 1680(%rsp)
movq %rax, 1672(%rsp)
movaps %xmm6, 1600(%rsp)
movaps %xmm7, 1616(%rsp)
movups %xmm4, 1640(%rsp)
movups %xmm5, 1656(%rsp)
call secp256k1_fe_impl_normalize_var
movq 32(%rsp), %r9
movq 40(%rsp), %r10
testb $1, 1640(%rsp)
movq 48(%rsp), %r11
jne .L1365
movq 1608(%rsp), %rdx
movq %rbp, %rdi
xorl %ebx, %ebx
movabsq $18014381329608892, %rax
addq 1600(%rsp), %rax
subq 16(%rsp), %rax
movq %rax, 896(%rsp)
movabsq $18014398509481980, %rax
addq %rax, %rdx
subq %r11, %rdx
movq %rdx, 904(%rsp)
movq 1616(%rsp), %rdx
addq %rax, %rdx
addq 1624(%rsp), %rax
subq %r9, %rax
subq %r10, %rdx
movq %rax, 920(%rsp)
movabsq $1125899906842620, %rax
addq 1632(%rsp), %rax
subq 8(%rsp), %rax
movq %rdx, 912(%rsp)
movq %rax, 928(%rsp)
call secp256k1_fe_impl_normalizes_to_zero
testl %eax, %eax
setne %bl
jmp .L1359
.p2align 4,,10
.p2align 3
.L1385:
movq 176(%rdi), %rsi
xorl %ebx, %ebx
leaq .LC7(%rip), %rdi
call *168(%r14)
jmp .L1359
.p2align 4,,10
.p2align 3
.L1384:
movq 176(%rdi), %rsi
xorl %ebx, %ebx
leaq .LC6(%rip), %rdi
call *168(%r14)
jmp .L1359
.p2align 4,,10
.p2align 3
.L1386:
movq 176(%r14), %rsi
leaq .LC18(%rip), %rdi
xorl %ebx, %ebx
call *168(%r14)
jmp .L1359
.L1387:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE340:
.size secp256k1_schnorrsig_verify, .-secp256k1_schnorrsig_verify