csecp256k1

Haskell FFI bindings to bitcoin-core/secp256k1 (docs.ppad.tech/csecp256k1).
git clone git://git.ppad.tech/csecp256k1.git
Log | Files | Refs | README | LICENSE

field_5x52_impl.h (20913B)


      1 /***********************************************************************
      2  * Copyright (c) 2013, 2014 Pieter Wuille                              *
      3  * Distributed under the MIT software license, see the accompanying    *
      4  * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
      5  ***********************************************************************/
      6 
      7 #ifndef SECP256K1_FIELD_REPR_IMPL_H
      8 #define SECP256K1_FIELD_REPR_IMPL_H
      9 
     10 #include "checkmem.h"
     11 #include "util.h"
     12 #include "field.h"
     13 #include "modinv64_impl.h"
     14 
     15 #include "field_5x52_int128_impl.h"
     16 
     17 #ifdef VERIFY
     18 static void haskellsecp256k1_v0_1_0_fe_impl_verify(const haskellsecp256k1_v0_1_0_fe *a) {
     19     const uint64_t *d = a->n;
     20     int m = a->normalized ? 1 : 2 * a->magnitude;
     21    /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
     22     VERIFY_CHECK(d[0] <= 0xFFFFFFFFFFFFFULL * m);
     23     VERIFY_CHECK(d[1] <= 0xFFFFFFFFFFFFFULL * m);
     24     VERIFY_CHECK(d[2] <= 0xFFFFFFFFFFFFFULL * m);
     25     VERIFY_CHECK(d[3] <= 0xFFFFFFFFFFFFFULL * m);
     26     VERIFY_CHECK(d[4] <= 0x0FFFFFFFFFFFFULL * m);
     27     if (a->normalized) {
     28         if ((d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
     29             VERIFY_CHECK(d[0] < 0xFFFFEFFFFFC2FULL);
     30         }
     31     }
     32 }
     33 #endif
     34 
     35 static void haskellsecp256k1_v0_1_0_fe_impl_get_bounds(haskellsecp256k1_v0_1_0_fe *r, int m) {
     36     r->n[0] = 0xFFFFFFFFFFFFFULL * 2 * m;
     37     r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * m;
     38     r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * m;
     39     r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * m;
     40     r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * m;
     41 }
     42 
     43 static void haskellsecp256k1_v0_1_0_fe_impl_normalize(haskellsecp256k1_v0_1_0_fe *r) {
     44     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
     45 
     46     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
     47     uint64_t m;
     48     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
     49 
     50     /* The first pass ensures the magnitude is 1, ... */
     51     t0 += x * 0x1000003D1ULL;
     52     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
     53     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
     54     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
     55     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
     56 
     57     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
     58     VERIFY_CHECK(t4 >> 49 == 0);
     59 
     60     /* At most a single final reduction is needed; check if the value is >= the field characteristic */
     61     x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
     62         & (t0 >= 0xFFFFEFFFFFC2FULL));
     63 
     64     /* Apply the final reduction (for constant-time behaviour, we do it always) */
     65     t0 += x * 0x1000003D1ULL;
     66     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
     67     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
     68     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
     69     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
     70 
     71     /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
     72     VERIFY_CHECK(t4 >> 48 == x);
     73 
     74     /* Mask off the possible multiple of 2^256 from the final reduction */
     75     t4 &= 0x0FFFFFFFFFFFFULL;
     76 
     77     r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
     78 }
     79 
     80 static void haskellsecp256k1_v0_1_0_fe_impl_normalize_weak(haskellsecp256k1_v0_1_0_fe *r) {
     81     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
     82 
     83     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
     84     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
     85 
     86     /* The first pass ensures the magnitude is 1, ... */
     87     t0 += x * 0x1000003D1ULL;
     88     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
     89     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
     90     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
     91     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
     92 
     93     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
     94     VERIFY_CHECK(t4 >> 49 == 0);
     95 
     96     r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
     97 }
     98 
     99 static void haskellsecp256k1_v0_1_0_fe_impl_normalize_var(haskellsecp256k1_v0_1_0_fe *r) {
    100     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
    101 
    102     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
    103     uint64_t m;
    104     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
    105 
    106     /* The first pass ensures the magnitude is 1, ... */
    107     t0 += x * 0x1000003D1ULL;
    108     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
    109     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
    110     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
    111     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
    112 
    113     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
    114     VERIFY_CHECK(t4 >> 49 == 0);
    115 
    116     /* At most a single final reduction is needed; check if the value is >= the field characteristic */
    117     x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
    118         & (t0 >= 0xFFFFEFFFFFC2FULL));
    119 
    120     if (x) {
    121         t0 += 0x1000003D1ULL;
    122         t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
    123         t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
    124         t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
    125         t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
    126 
    127         /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
    128         VERIFY_CHECK(t4 >> 48 == x);
    129 
    130         /* Mask off the possible multiple of 2^256 from the final reduction */
    131         t4 &= 0x0FFFFFFFFFFFFULL;
    132     }
    133 
    134     r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
    135 }
    136 
    137 static int haskellsecp256k1_v0_1_0_fe_impl_normalizes_to_zero(const haskellsecp256k1_v0_1_0_fe *r) {
    138     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
    139 
    140     /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
    141     uint64_t z0, z1;
    142 
    143     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
    144     uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
    145 
    146     /* The first pass ensures the magnitude is 1, ... */
    147     t0 += x * 0x1000003D1ULL;
    148     t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0  = t0; z1  = t0 ^ 0x1000003D0ULL;
    149     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
    150     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
    151     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
    152                                                 z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
    153 
    154     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
    155     VERIFY_CHECK(t4 >> 49 == 0);
    156 
    157     return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
    158 }
    159 
    160 static int haskellsecp256k1_v0_1_0_fe_impl_normalizes_to_zero_var(const haskellsecp256k1_v0_1_0_fe *r) {
    161     uint64_t t0, t1, t2, t3, t4;
    162     uint64_t z0, z1;
    163     uint64_t x;
    164 
    165     t0 = r->n[0];
    166     t4 = r->n[4];
    167 
    168     /* Reduce t4 at the start so there will be at most a single carry from the first pass */
    169     x = t4 >> 48;
    170 
    171     /* The first pass ensures the magnitude is 1, ... */
    172     t0 += x * 0x1000003D1ULL;
    173 
    174     /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
    175     z0 = t0 & 0xFFFFFFFFFFFFFULL;
    176     z1 = z0 ^ 0x1000003D0ULL;
    177 
    178     /* Fast return path should catch the majority of cases */
    179     if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
    180         return 0;
    181     }
    182 
    183     t1 = r->n[1];
    184     t2 = r->n[2];
    185     t3 = r->n[3];
    186 
    187     t4 &= 0x0FFFFFFFFFFFFULL;
    188 
    189     t1 += (t0 >> 52);
    190     t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
    191     t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
    192     t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
    193                                                 z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
    194 
    195     /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
    196     VERIFY_CHECK(t4 >> 49 == 0);
    197 
    198     return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
    199 }
    200 
    201 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_set_int(haskellsecp256k1_v0_1_0_fe *r, int a) {
    202     r->n[0] = a;
    203     r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
    204 }
    205 
    206 SECP256K1_INLINE static int haskellsecp256k1_v0_1_0_fe_impl_is_zero(const haskellsecp256k1_v0_1_0_fe *a) {
    207     const uint64_t *t = a->n;
    208     return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
    209 }
    210 
    211 SECP256K1_INLINE static int haskellsecp256k1_v0_1_0_fe_impl_is_odd(const haskellsecp256k1_v0_1_0_fe *a) {
    212     return a->n[0] & 1;
    213 }
    214 
    215 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_clear(haskellsecp256k1_v0_1_0_fe *a) {
    216     int i;
    217     for (i=0; i<5; i++) {
    218         a->n[i] = 0;
    219     }
    220 }
    221 
    222 static int haskellsecp256k1_v0_1_0_fe_impl_cmp_var(const haskellsecp256k1_v0_1_0_fe *a, const haskellsecp256k1_v0_1_0_fe *b) {
    223     int i;
    224     for (i = 4; i >= 0; i--) {
    225         if (a->n[i] > b->n[i]) {
    226             return 1;
    227         }
    228         if (a->n[i] < b->n[i]) {
    229             return -1;
    230         }
    231     }
    232     return 0;
    233 }
    234 
    235 static void haskellsecp256k1_v0_1_0_fe_impl_set_b32_mod(haskellsecp256k1_v0_1_0_fe *r, const unsigned char *a) {
    236     r->n[0] = (uint64_t)a[31]
    237             | ((uint64_t)a[30] << 8)
    238             | ((uint64_t)a[29] << 16)
    239             | ((uint64_t)a[28] << 24)
    240             | ((uint64_t)a[27] << 32)
    241             | ((uint64_t)a[26] << 40)
    242             | ((uint64_t)(a[25] & 0xF)  << 48);
    243     r->n[1] = (uint64_t)((a[25] >> 4) & 0xF)
    244             | ((uint64_t)a[24] << 4)
    245             | ((uint64_t)a[23] << 12)
    246             | ((uint64_t)a[22] << 20)
    247             | ((uint64_t)a[21] << 28)
    248             | ((uint64_t)a[20] << 36)
    249             | ((uint64_t)a[19] << 44);
    250     r->n[2] = (uint64_t)a[18]
    251             | ((uint64_t)a[17] << 8)
    252             | ((uint64_t)a[16] << 16)
    253             | ((uint64_t)a[15] << 24)
    254             | ((uint64_t)a[14] << 32)
    255             | ((uint64_t)a[13] << 40)
    256             | ((uint64_t)(a[12] & 0xF) << 48);
    257     r->n[3] = (uint64_t)((a[12] >> 4) & 0xF)
    258             | ((uint64_t)a[11] << 4)
    259             | ((uint64_t)a[10] << 12)
    260             | ((uint64_t)a[9]  << 20)
    261             | ((uint64_t)a[8]  << 28)
    262             | ((uint64_t)a[7]  << 36)
    263             | ((uint64_t)a[6]  << 44);
    264     r->n[4] = (uint64_t)a[5]
    265             | ((uint64_t)a[4] << 8)
    266             | ((uint64_t)a[3] << 16)
    267             | ((uint64_t)a[2] << 24)
    268             | ((uint64_t)a[1] << 32)
    269             | ((uint64_t)a[0] << 40);
    270 }
    271 
    272 static int haskellsecp256k1_v0_1_0_fe_impl_set_b32_limit(haskellsecp256k1_v0_1_0_fe *r, const unsigned char *a) {
    273     haskellsecp256k1_v0_1_0_fe_impl_set_b32_mod(r, a);
    274     return !((r->n[4] == 0x0FFFFFFFFFFFFULL) & ((r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL) & (r->n[0] >= 0xFFFFEFFFFFC2FULL));
    275 }
    276 
    277 /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
    278 static void haskellsecp256k1_v0_1_0_fe_impl_get_b32(unsigned char *r, const haskellsecp256k1_v0_1_0_fe *a) {
    279     r[0] = (a->n[4] >> 40) & 0xFF;
    280     r[1] = (a->n[4] >> 32) & 0xFF;
    281     r[2] = (a->n[4] >> 24) & 0xFF;
    282     r[3] = (a->n[4] >> 16) & 0xFF;
    283     r[4] = (a->n[4] >> 8) & 0xFF;
    284     r[5] = a->n[4] & 0xFF;
    285     r[6] = (a->n[3] >> 44) & 0xFF;
    286     r[7] = (a->n[3] >> 36) & 0xFF;
    287     r[8] = (a->n[3] >> 28) & 0xFF;
    288     r[9] = (a->n[3] >> 20) & 0xFF;
    289     r[10] = (a->n[3] >> 12) & 0xFF;
    290     r[11] = (a->n[3] >> 4) & 0xFF;
    291     r[12] = ((a->n[2] >> 48) & 0xF) | ((a->n[3] & 0xF) << 4);
    292     r[13] = (a->n[2] >> 40) & 0xFF;
    293     r[14] = (a->n[2] >> 32) & 0xFF;
    294     r[15] = (a->n[2] >> 24) & 0xFF;
    295     r[16] = (a->n[2] >> 16) & 0xFF;
    296     r[17] = (a->n[2] >> 8) & 0xFF;
    297     r[18] = a->n[2] & 0xFF;
    298     r[19] = (a->n[1] >> 44) & 0xFF;
    299     r[20] = (a->n[1] >> 36) & 0xFF;
    300     r[21] = (a->n[1] >> 28) & 0xFF;
    301     r[22] = (a->n[1] >> 20) & 0xFF;
    302     r[23] = (a->n[1] >> 12) & 0xFF;
    303     r[24] = (a->n[1] >> 4) & 0xFF;
    304     r[25] = ((a->n[0] >> 48) & 0xF) | ((a->n[1] & 0xF) << 4);
    305     r[26] = (a->n[0] >> 40) & 0xFF;
    306     r[27] = (a->n[0] >> 32) & 0xFF;
    307     r[28] = (a->n[0] >> 24) & 0xFF;
    308     r[29] = (a->n[0] >> 16) & 0xFF;
    309     r[30] = (a->n[0] >> 8) & 0xFF;
    310     r[31] = a->n[0] & 0xFF;
    311 }
    312 
    313 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_negate_unchecked(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe *a, int m) {
    314     /* For all legal values of m (0..31), the following properties hold: */
    315     VERIFY_CHECK(0xFFFFEFFFFFC2FULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
    316     VERIFY_CHECK(0xFFFFFFFFFFFFFULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
    317     VERIFY_CHECK(0x0FFFFFFFFFFFFULL * 2 * (m + 1) >= 0x0FFFFFFFFFFFFULL * 2 * m);
    318 
    319     /* Due to the properties above, the left hand in the subtractions below is never less than
    320      * the right hand. */
    321     r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
    322     r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
    323     r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
    324     r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
    325     r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
    326 }
    327 
    328 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_mul_int_unchecked(haskellsecp256k1_v0_1_0_fe *r, int a) {
    329     r->n[0] *= a;
    330     r->n[1] *= a;
    331     r->n[2] *= a;
    332     r->n[3] *= a;
    333     r->n[4] *= a;
    334 }
    335 
    336 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_add_int(haskellsecp256k1_v0_1_0_fe *r, int a) {
    337     r->n[0] += a;
    338 }
    339 
    340 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_add(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe *a) {
    341     r->n[0] += a->n[0];
    342     r->n[1] += a->n[1];
    343     r->n[2] += a->n[2];
    344     r->n[3] += a->n[3];
    345     r->n[4] += a->n[4];
    346 }
    347 
    348 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_mul(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe *a, const haskellsecp256k1_v0_1_0_fe * SECP256K1_RESTRICT b) {
    349     haskellsecp256k1_v0_1_0_fe_mul_inner(r->n, a->n, b->n);
    350 }
    351 
    352 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_sqr(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe *a) {
    353     haskellsecp256k1_v0_1_0_fe_sqr_inner(r->n, a->n);
    354 }
    355 
    356 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_fe_impl_cmov(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe *a, int flag) {
    357     uint64_t mask0, mask1;
    358     volatile int vflag = flag;
    359     SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
    360     mask0 = vflag + ~((uint64_t)0);
    361     mask1 = ~mask0;
    362     r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
    363     r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
    364     r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
    365     r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
    366     r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
    367 }
    368 
    369 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_fe_impl_half(haskellsecp256k1_v0_1_0_fe *r) {
    370     uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
    371     uint64_t one = (uint64_t)1;
    372     uint64_t mask = -(t0 & one) >> 12;
    373 
    374     /* Bounds analysis (over the rationals).
    375      *
    376      * Let m = r->magnitude
    377      *     C = 0xFFFFFFFFFFFFFULL * 2
    378      *     D = 0x0FFFFFFFFFFFFULL * 2
    379      *
    380      * Initial bounds: t0..t3 <= C * m
    381      *                     t4 <= D * m
    382      */
    383 
    384     t0 += 0xFFFFEFFFFFC2FULL & mask;
    385     t1 += mask;
    386     t2 += mask;
    387     t3 += mask;
    388     t4 += mask >> 4;
    389 
    390     VERIFY_CHECK((t0 & one) == 0);
    391 
    392     /* t0..t3: added <= C/2
    393      *     t4: added <= D/2
    394      *
    395      * Current bounds: t0..t3 <= C * (m + 1/2)
    396      *                     t4 <= D * (m + 1/2)
    397      */
    398 
    399     r->n[0] = (t0 >> 1) + ((t1 & one) << 51);
    400     r->n[1] = (t1 >> 1) + ((t2 & one) << 51);
    401     r->n[2] = (t2 >> 1) + ((t3 & one) << 51);
    402     r->n[3] = (t3 >> 1) + ((t4 & one) << 51);
    403     r->n[4] = (t4 >> 1);
    404 
    405     /* t0..t3: shifted right and added <= C/4 + 1/2
    406      *     t4: shifted right
    407      *
    408      * Current bounds: t0..t3 <= C * (m/2 + 1/2)
    409      *                     t4 <= D * (m/2 + 1/4)
    410      *
    411      * Therefore the output magnitude (M) has to be set such that:
    412      *     t0..t3: C * M >= C * (m/2 + 1/2)
    413      *         t4: D * M >= D * (m/2 + 1/4)
    414      *
    415      * It suffices for all limbs that, for any input magnitude m:
    416      *     M >= m/2 + 1/2
    417      *
    418      * and since we want the smallest such integer value for M:
    419      *     M == floor(m/2) + 1
    420      */
    421 }
    422 
    423 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_fe_storage_cmov(haskellsecp256k1_v0_1_0_fe_storage *r, const haskellsecp256k1_v0_1_0_fe_storage *a, int flag) {
    424     uint64_t mask0, mask1;
    425     volatile int vflag = flag;
    426     SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
    427     mask0 = vflag + ~((uint64_t)0);
    428     mask1 = ~mask0;
    429     r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
    430     r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
    431     r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
    432     r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
    433 }
    434 
    435 static void haskellsecp256k1_v0_1_0_fe_impl_to_storage(haskellsecp256k1_v0_1_0_fe_storage *r, const haskellsecp256k1_v0_1_0_fe *a) {
    436     r->n[0] = a->n[0] | a->n[1] << 52;
    437     r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
    438     r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
    439     r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
    440 }
    441 
    442 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_fe_impl_from_storage(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe_storage *a) {
    443     r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
    444     r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
    445     r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
    446     r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
    447     r->n[4] = a->n[3] >> 16;
    448 }
    449 
    450 static void haskellsecp256k1_v0_1_0_fe_from_signed62(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_modinv64_signed62 *a) {
    451     const uint64_t M52 = UINT64_MAX >> 12;
    452     const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
    453 
    454     /* The output from haskellsecp256k1_v0_1_0_modinv64{_var} should be normalized to range [0,modulus), and
    455      * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
    456      */
    457     VERIFY_CHECK(a0 >> 62 == 0);
    458     VERIFY_CHECK(a1 >> 62 == 0);
    459     VERIFY_CHECK(a2 >> 62 == 0);
    460     VERIFY_CHECK(a3 >> 62 == 0);
    461     VERIFY_CHECK(a4 >> 8 == 0);
    462 
    463     r->n[0] =  a0                   & M52;
    464     r->n[1] = (a0 >> 52 | a1 << 10) & M52;
    465     r->n[2] = (a1 >> 42 | a2 << 20) & M52;
    466     r->n[3] = (a2 >> 32 | a3 << 30) & M52;
    467     r->n[4] = (a3 >> 22 | a4 << 40);
    468 }
    469 
    470 static void haskellsecp256k1_v0_1_0_fe_to_signed62(haskellsecp256k1_v0_1_0_modinv64_signed62 *r, const haskellsecp256k1_v0_1_0_fe *a) {
    471     const uint64_t M62 = UINT64_MAX >> 2;
    472     const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4];
    473 
    474     r->v[0] = (a0       | a1 << 52) & M62;
    475     r->v[1] = (a1 >> 10 | a2 << 42) & M62;
    476     r->v[2] = (a2 >> 20 | a3 << 32) & M62;
    477     r->v[3] = (a3 >> 30 | a4 << 22) & M62;
    478     r->v[4] =  a4 >> 40;
    479 }
    480 
    481 static const haskellsecp256k1_v0_1_0_modinv64_modinfo haskellsecp256k1_v0_1_0_const_modinfo_fe = {
    482     {{-0x1000003D1LL, 0, 0, 0, 256}},
    483     0x27C7F6E22DDACACFLL
    484 };
    485 
    486 static void haskellsecp256k1_v0_1_0_fe_impl_inv(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe *x) {
    487     haskellsecp256k1_v0_1_0_fe tmp = *x;
    488     haskellsecp256k1_v0_1_0_modinv64_signed62 s;
    489 
    490     haskellsecp256k1_v0_1_0_fe_normalize(&tmp);
    491     haskellsecp256k1_v0_1_0_fe_to_signed62(&s, &tmp);
    492     haskellsecp256k1_v0_1_0_modinv64(&s, &haskellsecp256k1_v0_1_0_const_modinfo_fe);
    493     haskellsecp256k1_v0_1_0_fe_from_signed62(r, &s);
    494 }
    495 
    496 static void haskellsecp256k1_v0_1_0_fe_impl_inv_var(haskellsecp256k1_v0_1_0_fe *r, const haskellsecp256k1_v0_1_0_fe *x) {
    497     haskellsecp256k1_v0_1_0_fe tmp = *x;
    498     haskellsecp256k1_v0_1_0_modinv64_signed62 s;
    499 
    500     haskellsecp256k1_v0_1_0_fe_normalize_var(&tmp);
    501     haskellsecp256k1_v0_1_0_fe_to_signed62(&s, &tmp);
    502     haskellsecp256k1_v0_1_0_modinv64_var(&s, &haskellsecp256k1_v0_1_0_const_modinfo_fe);
    503     haskellsecp256k1_v0_1_0_fe_from_signed62(r, &s);
    504 }
    505 
    506 static int haskellsecp256k1_v0_1_0_fe_impl_is_square_var(const haskellsecp256k1_v0_1_0_fe *x) {
    507     haskellsecp256k1_v0_1_0_fe tmp;
    508     haskellsecp256k1_v0_1_0_modinv64_signed62 s;
    509     int jac, ret;
    510 
    511     tmp = *x;
    512     haskellsecp256k1_v0_1_0_fe_normalize_var(&tmp);
    513     /* haskellsecp256k1_v0_1_0_jacobi64_maybe_var cannot deal with input 0. */
    514     if (haskellsecp256k1_v0_1_0_fe_is_zero(&tmp)) return 1;
    515     haskellsecp256k1_v0_1_0_fe_to_signed62(&s, &tmp);
    516     jac = haskellsecp256k1_v0_1_0_jacobi64_maybe_var(&s, &haskellsecp256k1_v0_1_0_const_modinfo_fe);
    517     if (jac == 0) {
    518         /* haskellsecp256k1_v0_1_0_jacobi64_maybe_var failed to compute the Jacobi symbol. Fall back
    519          * to computing a square root. This should be extremely rare with random
    520          * input (except in VERIFY mode, where a lower iteration count is used). */
    521         haskellsecp256k1_v0_1_0_fe dummy;
    522         ret = haskellsecp256k1_v0_1_0_fe_sqrt(&dummy, &tmp);
    523     } else {
    524         ret = jac >= 0;
    525     }
    526     return ret;
    527 }
    528 
    529 #endif /* SECP256K1_FIELD_REPR_IMPL_H */