@@ -534,7 +534,7 @@ static void secp256k1_gej_add_var(secp256k1_gej *r, const secp256k1_gej *a, cons
534
534
}
535
535
536
536
static void secp256k1_gej_add_ge_var (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b , secp256k1_fe * rzr ) {
537
- /* 8 mul, 3 sqr, 13 add/negate/normalize_weak /normalizes_to_zero (ignoring special cases) */
537
+ /* Operations: 8 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
538
538
secp256k1_fe z12 , u1 , u2 , s1 , s2 , h , i , h2 , h3 , t ;
539
539
secp256k1_gej_verify (a );
540
540
secp256k1_ge_verify (b );
@@ -553,11 +553,11 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
553
553
}
554
554
555
555
secp256k1_fe_sqr (& z12 , & a -> z );
556
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 );
556
+ u1 = a -> x ;
557
557
secp256k1_fe_mul (& u2 , & b -> x , & z12 );
558
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 );
558
+ s1 = a -> y ;
559
559
secp256k1_fe_mul (& s2 , & b -> y , & z12 ); secp256k1_fe_mul (& s2 , & s2 , & a -> z );
560
- secp256k1_fe_negate (& h , & u1 , 1 ); secp256k1_fe_add (& h , & u2 );
560
+ secp256k1_fe_negate (& h , & u1 , SECP256K1_GEJ_X_MAGNITUDE_MAX ); secp256k1_fe_add (& h , & u2 );
561
561
secp256k1_fe_negate (& i , & s2 , 1 ); secp256k1_fe_add (& i , & s1 );
562
562
if (secp256k1_fe_normalizes_to_zero_var (& h )) {
563
563
if (secp256k1_fe_normalizes_to_zero_var (& i )) {
@@ -597,7 +597,7 @@ static void secp256k1_gej_add_ge_var(secp256k1_gej *r, const secp256k1_gej *a, c
597
597
}
598
598
599
599
static void secp256k1_gej_add_zinv_var (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b , const secp256k1_fe * bzinv ) {
600
- /* 9 mul, 3 sqr, 13 add/negate/normalize_weak /normalizes_to_zero (ignoring special cases) */
600
+ /* Operations: 9 mul, 3 sqr, 11 add/negate/normalizes_to_zero (ignoring special cases) */
601
601
secp256k1_fe az , z12 , u1 , u2 , s1 , s2 , h , i , h2 , h3 , t ;
602
602
secp256k1_gej_verify (a );
603
603
secp256k1_ge_verify (b );
@@ -630,11 +630,11 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
630
630
secp256k1_fe_mul (& az , & a -> z , bzinv );
631
631
632
632
secp256k1_fe_sqr (& z12 , & az );
633
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 );
633
+ u1 = a -> x ;
634
634
secp256k1_fe_mul (& u2 , & b -> x , & z12 );
635
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 );
635
+ s1 = a -> y ;
636
636
secp256k1_fe_mul (& s2 , & b -> y , & z12 ); secp256k1_fe_mul (& s2 , & s2 , & az );
637
- secp256k1_fe_negate (& h , & u1 , 1 ); secp256k1_fe_add (& h , & u2 );
637
+ secp256k1_fe_negate (& h , & u1 , SECP256K1_GEJ_X_MAGNITUDE_MAX ); secp256k1_fe_add (& h , & u2 );
638
638
secp256k1_fe_negate (& i , & s2 , 1 ); secp256k1_fe_add (& i , & s1 );
639
639
if (secp256k1_fe_normalizes_to_zero_var (& h )) {
640
640
if (secp256k1_fe_normalizes_to_zero_var (& i )) {
@@ -668,14 +668,13 @@ static void secp256k1_gej_add_zinv_var(secp256k1_gej *r, const secp256k1_gej *a,
668
668
669
669
670
670
static void secp256k1_gej_add_ge (secp256k1_gej * r , const secp256k1_gej * a , const secp256k1_ge * b ) {
671
- /* Operations: 7 mul, 5 sqr, 24 add/cmov/half/mul_int/negate/normalize_weak /normalizes_to_zero */
671
+ /* Operations: 7 mul, 5 sqr, 21 add/cmov/half/mul_int/negate/normalizes_to_zero */
672
672
secp256k1_fe zz , u1 , u2 , s1 , s2 , t , tt , m , n , q , rr ;
673
673
secp256k1_fe m_alt , rr_alt ;
674
674
int degenerate ;
675
675
secp256k1_gej_verify (a );
676
676
secp256k1_ge_verify (b );
677
677
VERIFY_CHECK (!b -> infinity );
678
- VERIFY_CHECK (a -> infinity == 0 || a -> infinity == 1 );
679
678
680
679
/* In:
681
680
* Eric Brier and Marc Joye, Weierstrass Elliptic Curves and Side-Channel Attacks.
@@ -728,17 +727,17 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
728
727
*/
729
728
730
729
secp256k1_fe_sqr (& zz , & a -> z ); /* z = Z1^2 */
731
- u1 = a -> x ; secp256k1_fe_normalize_weak ( & u1 ); /* u1 = U1 = X1*Z2^2 (1 ) */
730
+ u1 = a -> x ; /* u1 = U1 = X1*Z2^2 (GEJ_X_M ) */
732
731
secp256k1_fe_mul (& u2 , & b -> x , & zz ); /* u2 = U2 = X2*Z1^2 (1) */
733
- s1 = a -> y ; secp256k1_fe_normalize_weak ( & s1 ); /* s1 = S1 = Y1*Z2^3 (1 ) */
732
+ s1 = a -> y ; /* s1 = S1 = Y1*Z2^3 (GEJ_Y_M ) */
734
733
secp256k1_fe_mul (& s2 , & b -> y , & zz ); /* s2 = Y2*Z1^2 (1) */
735
734
secp256k1_fe_mul (& s2 , & s2 , & a -> z ); /* s2 = S2 = Y2*Z1^3 (1) */
736
- t = u1 ; secp256k1_fe_add (& t , & u2 ); /* t = T = U1+U2 (2 ) */
737
- m = s1 ; secp256k1_fe_add (& m , & s2 ); /* m = M = S1+S2 (2 ) */
735
+ t = u1 ; secp256k1_fe_add (& t , & u2 ); /* t = T = U1+U2 (GEJ_X_M+1 ) */
736
+ m = s1 ; secp256k1_fe_add (& m , & s2 ); /* m = M = S1+S2 (GEJ_Y_M+1 ) */
738
737
secp256k1_fe_sqr (& rr , & t ); /* rr = T^2 (1) */
739
- secp256k1_fe_negate (& m_alt , & u2 , 1 ); /* Malt = -X2*Z1^2 */
740
- secp256k1_fe_mul (& tt , & u1 , & m_alt ); /* tt = -U1*U2 (2 ) */
741
- secp256k1_fe_add (& rr , & tt ); /* rr = R = T^2-U1*U2 (3 ) */
738
+ secp256k1_fe_negate (& m_alt , & u2 , 1 ); /* Malt = -X2*Z1^2 (2) */
739
+ secp256k1_fe_mul (& tt , & u1 , & m_alt ); /* tt = -U1*U2 (1 ) */
740
+ secp256k1_fe_add (& rr , & tt ); /* rr = R = T^2-U1*U2 (2 ) */
742
741
/* If lambda = R/M = R/0 we have a problem (except in the "trivial"
743
742
* case that Z = z1z2 = 0, and this is special-cased later on). */
744
743
degenerate = secp256k1_fe_normalizes_to_zero (& m );
@@ -748,34 +747,36 @@ static void secp256k1_gej_add_ge(secp256k1_gej *r, const secp256k1_gej *a, const
748
747
* non-indeterminate expression for lambda is (y1 - y2)/(x1 - x2),
749
748
* so we set R/M equal to this. */
750
749
rr_alt = s1 ;
751
- secp256k1_fe_mul_int (& rr_alt , 2 ); /* rr = Y1*Z2^3 - Y2*Z1^3 (2) */
752
- secp256k1_fe_add (& m_alt , & u1 ); /* Malt = X1*Z2^2 - X2*Z1^2 */
750
+ secp256k1_fe_mul_int (& rr_alt , 2 ); /* rr_alt = Y1*Z2^3 - Y2*Z1^3 (GEJ_Y_M* 2) */
751
+ secp256k1_fe_add (& m_alt , & u1 ); /* Malt = X1*Z2^2 - X2*Z1^2 (GEJ_X_M+2) */
753
752
754
- secp256k1_fe_cmov (& rr_alt , & rr , !degenerate );
755
- secp256k1_fe_cmov (& m_alt , & m , !degenerate );
753
+ secp256k1_fe_cmov (& rr_alt , & rr , !degenerate ); /* rr_alt (GEJ_Y_M*2) */
754
+ secp256k1_fe_cmov (& m_alt , & m , !degenerate ); /* m_alt (GEJ_X_M+2) */
756
755
/* Now Ralt / Malt = lambda and is guaranteed not to be Ralt / 0.
757
756
* From here on out Ralt and Malt represent the numerator
758
757
* and denominator of lambda; R and M represent the explicit
759
758
* expressions x1^2 + x2^2 + x1x2 and y1 + y2. */
760
759
secp256k1_fe_sqr (& n , & m_alt ); /* n = Malt^2 (1) */
761
- secp256k1_fe_negate (& q , & t , 2 ); /* q = -T (3) */
760
+ secp256k1_fe_negate (& q , & t ,
761
+ SECP256K1_GEJ_X_MAGNITUDE_MAX + 1 ); /* q = -T (GEJ_X_M+2) */
762
762
secp256k1_fe_mul (& q , & q , & n ); /* q = Q = -T*Malt^2 (1) */
763
763
/* These two lines use the observation that either M == Malt or M == 0,
764
764
* so M^3 * Malt is either Malt^4 (which is computed by squaring), or
765
765
* zero (which is "computed" by cmov). So the cost is one squaring
766
766
* versus two multiplications. */
767
- secp256k1_fe_sqr (& n , & n );
768
- secp256k1_fe_cmov (& n , & m , degenerate ); /* n = M^3 * Malt (2 ) */
767
+ secp256k1_fe_sqr (& n , & n ); /* n = Malt^4 (1) */
768
+ secp256k1_fe_cmov (& n , & m , degenerate ); /* n = M^3 * Malt (GEJ_Y_M+1 ) */
769
769
secp256k1_fe_sqr (& t , & rr_alt ); /* t = Ralt^2 (1) */
770
770
secp256k1_fe_mul (& r -> z , & a -> z , & m_alt ); /* r->z = Z3 = Malt*Z (1) */
771
771
secp256k1_fe_add (& t , & q ); /* t = Ralt^2 + Q (2) */
772
772
r -> x = t ; /* r->x = X3 = Ralt^2 + Q (2) */
773
773
secp256k1_fe_mul_int (& t , 2 ); /* t = 2*X3 (4) */
774
774
secp256k1_fe_add (& t , & q ); /* t = 2*X3 + Q (5) */
775
775
secp256k1_fe_mul (& t , & t , & rr_alt ); /* t = Ralt*(2*X3 + Q) (1) */
776
- secp256k1_fe_add (& t , & n ); /* t = Ralt*(2*X3 + Q) + M^3*Malt (3) */
777
- secp256k1_fe_negate (& r -> y , & t , 3 ); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (4) */
778
- secp256k1_fe_half (& r -> y ); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 (3) */
776
+ secp256k1_fe_add (& t , & n ); /* t = Ralt*(2*X3 + Q) + M^3*Malt (GEJ_Y_M+2) */
777
+ secp256k1_fe_negate (& r -> y , & t ,
778
+ SECP256K1_GEJ_Y_MAGNITUDE_MAX + 2 ); /* r->y = -(Ralt*(2*X3 + Q) + M^3*Malt) (GEJ_Y_M+3) */
779
+ secp256k1_fe_half (& r -> y ); /* r->y = Y3 = -(Ralt*(2*X3 + Q) + M^3*Malt)/2 ((GEJ_Y_M+3)/2 + 1) */
779
780
780
781
/* In case a->infinity == 1, replace r with (b->x, b->y, 1). */
781
782
secp256k1_fe_cmov (& r -> x , & b -> x , a -> infinity );
0 commit comments