@@ -56,7 +56,6 @@ static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *p
56
56
secp256k1_fe_cmov (& (r )-> y , & neg_y , (n ) != abs_n ); \
57
57
} while (0 )
58
58
59
-
60
59
/** Convert a number to WNAF notation.
61
60
* The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val.
62
61
* It has the following guarantees:
@@ -72,51 +71,35 @@ static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *p
72
71
*/
73
72
static int secp256k1_wnaf_const (int * wnaf , const secp256k1_scalar * scalar , int w , int size ) {
74
73
int global_sign ;
75
- int skew = 0 ;
74
+ int skew ;
76
75
int word = 0 ;
77
76
78
77
/* 1 2 3 */
79
78
int u_last ;
80
79
int u ;
81
80
82
81
int flip ;
83
- int bit ;
84
- secp256k1_scalar s ;
85
- int not_neg_one ;
82
+ secp256k1_scalar s = * scalar ;
86
83
87
84
VERIFY_CHECK (w > 0 );
88
85
VERIFY_CHECK (size > 0 );
89
86
90
87
/* Note that we cannot handle even numbers by negating them to be odd, as is
91
88
* done in other implementations, since if our scalars were specified to have
92
89
* width < 256 for performance reasons, their negations would have width 256
93
- * and we'd lose any performance benefit. Instead, we use a technique from
94
- * Section 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for even)
95
- * or 2 (for odd) to the number we are encoding, returning a skew value indicating
90
+ * and we'd lose any performance benefit. Instead, we use a variation of a
91
+ * technique from Section 4.2 of the Okeya/Tagaki paper, which is to add 1 to the
92
+ * number we are encoding when it is even , returning a skew value indicating
96
93
* this, and having the caller compensate after doing the multiplication.
97
94
*
98
95
* In fact, we _do_ want to negate numbers to minimize their bit-lengths (and in
99
96
* particular, to ensure that the outputs from the endomorphism-split fit into
100
- * 128 bits). If we negate, the parity of our number flips, inverting which of
101
- * {1, 2} we want to add to the scalar when ensuring that it's odd. Further
102
- * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and
103
- * we need to special-case it in this logic. */
104
- flip = secp256k1_scalar_is_high (scalar );
105
- /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */
106
- bit = flip ^ !secp256k1_scalar_is_even (scalar );
107
- /* We check for negative one, since adding 2 to it will cause an overflow */
108
- secp256k1_scalar_negate (& s , scalar );
109
- not_neg_one = !secp256k1_scalar_is_one (& s );
110
- s = * scalar ;
111
- secp256k1_scalar_cadd_bit (& s , bit , not_neg_one );
112
- /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects
113
- * that we added two to it and flipped it. In fact for -1 these operations are
114
- * identical. We only flipped, but since skewing is required (in the sense that
115
- * the skew must be 1 or 2, never zero) and flipping is not, we need to change
116
- * our flags to claim that we only skewed. */
97
+ * 128 bits). If we negate, the parity of our number flips, affecting whether
98
+ * we want to add to the scalar to ensure that it's odd. */
99
+ flip = secp256k1_scalar_is_high (& s );
100
+ skew = flip ^ secp256k1_scalar_is_even (& s );
101
+ secp256k1_scalar_cadd_bit (& s , 0 , skew );
117
102
global_sign = secp256k1_scalar_cond_negate (& s , flip );
118
- global_sign *= not_neg_one * 2 - 1 ;
119
- skew = 1 << bit ;
120
103
121
104
/* 4 */
122
105
u_last = secp256k1_scalar_shr_int (& s , w );
@@ -236,19 +219,17 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons
236
219
/* Correct for wNAF skew */
237
220
secp256k1_gej tmp ;
238
221
secp256k1_ge a_1 ;
239
-
240
222
secp256k1_ge_neg (& a_1 , a );
241
- secp256k1_gej_add_ge ( r , r , & a_1 );
223
+
242
224
secp256k1_gej_add_ge (& tmp , r , & a_1 );
243
- secp256k1_gej_cmov (r , & tmp , skew_1 == 2 );
225
+ secp256k1_gej_cmov (r , & tmp , skew_1 );
244
226
245
227
if (size > 128 ) {
246
228
secp256k1_ge a_lam ;
247
229
secp256k1_ge_mul_lambda (& a_lam , & a_1 );
248
230
249
- secp256k1_gej_add_ge (r , r , & a_lam );
250
231
secp256k1_gej_add_ge (& tmp , r , & a_lam );
251
- secp256k1_gej_cmov (r , & tmp , skew_lam == 2 );
232
+ secp256k1_gej_cmov (r , & tmp , skew_lam );
252
233
}
253
234
}
254
235
}
0 commit comments