csecp256k1

Haskell FFI bindings to bitcoin-core/secp256k1 (docs.ppad.tech/csecp256k1).
git clone git://git.ppad.tech/csecp256k1.git
Log | Files | Refs | README | LICENSE

int128_struct_impl.h (8654B)


      1 #ifndef SECP256K1_INT128_STRUCT_IMPL_H
      2 #define SECP256K1_INT128_STRUCT_IMPL_H
      3 
      4 #include "int128.h"
      5 #include "util.h"
      6 
      7 #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) /* MSVC */
      8 #    include <intrin.h>
      9 #    if defined(_M_ARM64) || defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
     10 /* On ARM64 MSVC, use __(u)mulh for the upper half of 64x64 multiplications.
     11    (Define SECP256K1_MSVC_MULH_TEST_OVERRIDE to test this code path on X64,
     12    which supports both __(u)mulh and _umul128.) */
     13 #        if defined(SECP256K1_MSVC_MULH_TEST_OVERRIDE)
     14 #            pragma message(__FILE__ ": SECP256K1_MSVC_MULH_TEST_OVERRIDE is defined, forcing use of __(u)mulh.")
     15 #        endif
     16 static SECP256K1_INLINE uint64_t haskellsecp256k1_v0_1_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
     17     *hi = __umulh(a, b);
     18     return a * b;
     19 }
     20 
     21 static SECP256K1_INLINE int64_t haskellsecp256k1_v0_1_0_mul128(int64_t a, int64_t b, int64_t* hi) {
     22     *hi = __mulh(a, b);
     23     return (uint64_t)a * (uint64_t)b;
     24 }
     25 #    else
     26 /* On x84_64 MSVC, use native _(u)mul128 for 64x64->128 multiplications. */
     27 #        define haskellsecp256k1_v0_1_0_umul128 _umul128
     28 #        define haskellsecp256k1_v0_1_0_mul128 _mul128
     29 #    endif
     30 #else
     31 /* On other systems, emulate 64x64->128 multiplications using 32x32->64 multiplications. */
     32 static SECP256K1_INLINE uint64_t haskellsecp256k1_v0_1_0_umul128(uint64_t a, uint64_t b, uint64_t* hi) {
     33     uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
     34     uint64_t lh = (uint32_t)a * (b >> 32);
     35     uint64_t hl = (a >> 32) * (uint32_t)b;
     36     uint64_t hh = (a >> 32) * (b >> 32);
     37     uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl;
     38     *hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32);
     39     return (mid34 << 32) + (uint32_t)ll;
     40 }
     41 
     42 static SECP256K1_INLINE int64_t haskellsecp256k1_v0_1_0_mul128(int64_t a, int64_t b, int64_t* hi) {
     43     uint64_t ll = (uint64_t)(uint32_t)a * (uint32_t)b;
     44     int64_t lh = (uint32_t)a * (b >> 32);
     45     int64_t hl = (a >> 32) * (uint32_t)b;
     46     int64_t hh = (a >> 32) * (b >> 32);
     47     uint64_t mid34 = (ll >> 32) + (uint32_t)lh + (uint32_t)hl;
     48     *hi = hh + (lh >> 32) + (hl >> 32) + (mid34 >> 32);
     49     return (mid34 << 32) + (uint32_t)ll;
     50 }
     51 #endif
     52 
     53 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_u128_load(haskellsecp256k1_v0_1_0_uint128 *r, uint64_t hi, uint64_t lo) {
     54     r->hi = hi;
     55     r->lo = lo;
     56 }
     57 
     58 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_u128_mul(haskellsecp256k1_v0_1_0_uint128 *r, uint64_t a, uint64_t b) {
     59    r->lo = haskellsecp256k1_v0_1_0_umul128(a, b, &r->hi);
     60 }
     61 
     62 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_u128_accum_mul(haskellsecp256k1_v0_1_0_uint128 *r, uint64_t a, uint64_t b) {
     63    uint64_t lo, hi;
     64    lo = haskellsecp256k1_v0_1_0_umul128(a, b, &hi);
     65    r->lo += lo;
     66    r->hi += hi + (r->lo < lo);
     67 }
     68 
     69 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_u128_accum_u64(haskellsecp256k1_v0_1_0_uint128 *r, uint64_t a) {
     70    r->lo += a;
     71    r->hi += r->lo < a;
     72 }
     73 
     74 /* Unsigned (logical) right shift.
     75  * Non-constant time in n.
     76  */
     77 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_u128_rshift(haskellsecp256k1_v0_1_0_uint128 *r, unsigned int n) {
     78    VERIFY_CHECK(n < 128);
     79    if (n >= 64) {
     80      r->lo = r->hi >> (n-64);
     81      r->hi = 0;
     82    } else if (n > 0) {
     83 #if defined(_MSC_VER) && defined(_M_X64)
     84      VERIFY_CHECK(n < 64);
     85      r->lo = __shiftright128(r->lo, r->hi, n);
     86 #else
     87      r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n;
     88 #endif
     89      r->hi >>= n;
     90    }
     91 }
     92 
     93 static SECP256K1_INLINE uint64_t haskellsecp256k1_v0_1_0_u128_to_u64(const haskellsecp256k1_v0_1_0_uint128 *a) {
     94    return a->lo;
     95 }
     96 
     97 static SECP256K1_INLINE uint64_t haskellsecp256k1_v0_1_0_u128_hi_u64(const haskellsecp256k1_v0_1_0_uint128 *a) {
     98    return a->hi;
     99 }
    100 
    101 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_u128_from_u64(haskellsecp256k1_v0_1_0_uint128 *r, uint64_t a) {
    102    r->hi = 0;
    103    r->lo = a;
    104 }
    105 
    106 static SECP256K1_INLINE int haskellsecp256k1_v0_1_0_u128_check_bits(const haskellsecp256k1_v0_1_0_uint128 *r, unsigned int n) {
    107    VERIFY_CHECK(n < 128);
    108    return n >= 64 ? r->hi >> (n - 64) == 0
    109                   : r->hi == 0 && r->lo >> n == 0;
    110 }
    111 
    112 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_i128_load(haskellsecp256k1_v0_1_0_int128 *r, int64_t hi, uint64_t lo) {
    113     r->hi = hi;
    114     r->lo = lo;
    115 }
    116 
    117 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_i128_mul(haskellsecp256k1_v0_1_0_int128 *r, int64_t a, int64_t b) {
    118    int64_t hi;
    119    r->lo = (uint64_t)haskellsecp256k1_v0_1_0_mul128(a, b, &hi);
    120    r->hi = (uint64_t)hi;
    121 }
    122 
    123 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_i128_accum_mul(haskellsecp256k1_v0_1_0_int128 *r, int64_t a, int64_t b) {
    124    int64_t hi;
    125    uint64_t lo = (uint64_t)haskellsecp256k1_v0_1_0_mul128(a, b, &hi);
    126    r->lo += lo;
    127    hi += r->lo < lo;
    128    /* Verify no overflow.
    129     * If r represents a positive value (the sign bit is not set) and the value we are adding is a positive value (the sign bit is not set),
    130     * then we require that the resulting value also be positive (the sign bit is not set).
    131     * Note that (X <= Y) means (X implies Y) when X and Y are boolean values (i.e. 0 or 1).
    132     */
    133    VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi <= 0x7fffffffffffffffu));
    134    /* Verify no underflow.
    135     * If r represents a negative value (the sign bit is set) and the value we are adding is a negative value (the sign bit is set),
    136     * then we require that the resulting value also be negative (the sign bit is set).
    137     */
    138    VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi + (uint64_t)hi > 0x7fffffffffffffffu));
    139    r->hi += hi;
    140 }
    141 
    142 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_i128_dissip_mul(haskellsecp256k1_v0_1_0_int128 *r, int64_t a, int64_t b) {
    143    int64_t hi;
    144    uint64_t lo = (uint64_t)haskellsecp256k1_v0_1_0_mul128(a, b, &hi);
    145    hi += r->lo < lo;
    146    /* Verify no overflow.
    147     * If r represents a positive value (the sign bit is not set) and the value we are subtracting is a negative value (the sign bit is set),
    148     * then we require that the resulting value also be positive (the sign bit is not set).
    149     */
    150    VERIFY_CHECK((r->hi <= 0x7fffffffffffffffu && (uint64_t)hi > 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi <= 0x7fffffffffffffffu));
    151    /* Verify no underflow.
    152     * If r represents a negative value (the sign bit is set) and the value we are subtracting is a positive value (the sign sign bit is not set),
    153     * then we require that the resulting value also be negative (the sign bit is set).
    154     */
    155    VERIFY_CHECK((r->hi > 0x7fffffffffffffffu && (uint64_t)hi <= 0x7fffffffffffffffu) <= (r->hi - (uint64_t)hi > 0x7fffffffffffffffu));
    156    r->hi -= hi;
    157    r->lo -= lo;
    158 }
    159 
    160 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_i128_det(haskellsecp256k1_v0_1_0_int128 *r, int64_t a, int64_t b, int64_t c, int64_t d) {
    161    haskellsecp256k1_v0_1_0_i128_mul(r, a, d);
    162    haskellsecp256k1_v0_1_0_i128_dissip_mul(r, b, c);
    163 }
    164 
    165 /* Signed (arithmetic) right shift.
    166  * Non-constant time in n.
    167  */
    168 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_i128_rshift(haskellsecp256k1_v0_1_0_int128 *r, unsigned int n) {
    169    VERIFY_CHECK(n < 128);
    170    if (n >= 64) {
    171      r->lo = (uint64_t)((int64_t)(r->hi) >> (n-64));
    172      r->hi = (uint64_t)((int64_t)(r->hi) >> 63);
    173    } else if (n > 0) {
    174      r->lo = ((1U * r->hi) << (64-n)) | r->lo >> n;
    175      r->hi = (uint64_t)((int64_t)(r->hi) >> n);
    176    }
    177 }
    178 
    179 static SECP256K1_INLINE uint64_t haskellsecp256k1_v0_1_0_i128_to_u64(const haskellsecp256k1_v0_1_0_int128 *a) {
    180    return a->lo;
    181 }
    182 
    183 static SECP256K1_INLINE int64_t haskellsecp256k1_v0_1_0_i128_to_i64(const haskellsecp256k1_v0_1_0_int128 *a) {
    184    /* Verify that a represents a 64 bit signed value by checking that the high bits are a sign extension of the low bits. */
    185    VERIFY_CHECK(a->hi == -(a->lo >> 63));
    186    return (int64_t)haskellsecp256k1_v0_1_0_i128_to_u64(a);
    187 }
    188 
    189 static SECP256K1_INLINE void haskellsecp256k1_v0_1_0_i128_from_i64(haskellsecp256k1_v0_1_0_int128 *r, int64_t a) {
    190    r->hi = (uint64_t)(a >> 63);
    191    r->lo = (uint64_t)a;
    192 }
    193 
    194 static SECP256K1_INLINE int haskellsecp256k1_v0_1_0_i128_eq_var(const haskellsecp256k1_v0_1_0_int128 *a, const haskellsecp256k1_v0_1_0_int128 *b) {
    195    return a->hi == b->hi && a->lo == b->lo;
    196 }
    197 
    198 static SECP256K1_INLINE int haskellsecp256k1_v0_1_0_i128_check_pow2(const haskellsecp256k1_v0_1_0_int128 *r, unsigned int n, int sign) {
    199     VERIFY_CHECK(n < 127);
    200     VERIFY_CHECK(sign == 1 || sign == -1);
    201     return n >= 64 ? r->hi == (uint64_t)sign << (n - 64) && r->lo == 0
    202                    : r->hi == (uint64_t)(sign >> 1) && r->lo == (uint64_t)sign << n;
    203 }
    204 
    205 #endif