ecmult_impl.h (38513B)
1 /****************************************************************************** 2 * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or https://www.opensource.org/licenses/mit-license.php. * 5 ******************************************************************************/ 6 7 #ifndef SECP256K1_ECMULT_IMPL_H 8 #define SECP256K1_ECMULT_IMPL_H 9 10 #include <string.h> 11 #include <stdint.h> 12 13 #include "util.h" 14 #include "group.h" 15 #include "scalar.h" 16 #include "ecmult.h" 17 #include "precomputed_ecmult.h" 18 19 #if defined(EXHAUSTIVE_TEST_ORDER) 20 /* We need to lower these values for exhaustive tests because 21 * the tables cannot have infinities in them (this breaks the 22 * affine-isomorphism stuff which tracks z-ratios) */ 23 # if EXHAUSTIVE_TEST_ORDER > 128 24 # define WINDOW_A 5 25 # elif EXHAUSTIVE_TEST_ORDER > 8 26 # define WINDOW_A 4 27 # else 28 # define WINDOW_A 2 29 # endif 30 #else 31 /* optimal for 128-bit and 256-bit exponents. */ 32 # define WINDOW_A 5 33 /** Larger values for ECMULT_WINDOW_SIZE result in possibly better 34 * performance at the cost of an exponentially larger precomputed 35 * table. The exact table size is 36 * (1 << (WINDOW_G - 2)) * sizeof(haskellsecp256k1_v0_1_0_ge_storage) bytes, 37 * where sizeof(haskellsecp256k1_v0_1_0_ge_storage) is typically 64 bytes but can 38 * be larger due to platform-specific padding and alignment. 39 * Two tables of this size are used (due to the endomorphism 40 * optimization). 41 */ 42 #endif 43 44 #define WNAF_BITS 128 45 #define WNAF_SIZE_BITS(bits, w) (((bits) + (w) - 1) / (w)) 46 #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w) 47 48 /* The number of objects allocated on the scratch space for ecmult_multi algorithms */ 49 #define PIPPENGER_SCRATCH_OBJECTS 6 50 #define STRAUSS_SCRATCH_OBJECTS 5 51 52 #define PIPPENGER_MAX_BUCKET_WINDOW 12 53 54 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */ 55 #define ECMULT_PIPPENGER_THRESHOLD 88 56 57 #define ECMULT_MAX_POINTS_PER_BATCH 5000000 58 59 /** Fill a table 'pre_a' with precomputed odd multiples of a. 60 * pre_a will contain [1*a,3*a,...,(2*n-1)*a], so it needs space for n group elements. 61 * zr needs space for n field elements. 62 * 63 * Although pre_a is an array of _ge rather than _gej, it actually represents elements 64 * in Jacobian coordinates with their z coordinates omitted. The omitted z-coordinates 65 * can be recovered using z and zr. Using the notation z(b) to represent the omitted 66 * z coordinate of b: 67 * - z(pre_a[n-1]) = 'z' 68 * - z(pre_a[i-1]) = z(pre_a[i]) / zr[i] for n > i > 0 69 * 70 * Lastly the zr[0] value, which isn't used above, is set so that: 71 * - a.z = z(pre_a[0]) / zr[0] 72 */ 73 static void haskellsecp256k1_v0_1_0_ecmult_odd_multiples_table(int n, haskellsecp256k1_v0_1_0_ge *pre_a, haskellsecp256k1_v0_1_0_fe *zr, haskellsecp256k1_v0_1_0_fe *z, const haskellsecp256k1_v0_1_0_gej *a) { 74 haskellsecp256k1_v0_1_0_gej d, ai; 75 haskellsecp256k1_v0_1_0_ge d_ge; 76 int i; 77 78 VERIFY_CHECK(!a->infinity); 79 80 haskellsecp256k1_v0_1_0_gej_double_var(&d, a, NULL); 81 82 /* 83 * Perform the additions using an isomorphic curve Y^2 = X^3 + 7*C^6 where C := d.z. 84 * The isomorphism, phi, maps a secp256k1 point (x, y) to the point (x*C^2, y*C^3) on the other curve. 85 * In Jacobian coordinates phi maps (x, y, z) to (x*C^2, y*C^3, z) or, equivalently to (x, y, z/C). 86 * 87 * phi(x, y, z) = (x*C^2, y*C^3, z) = (x, y, z/C) 88 * d_ge := phi(d) = (d.x, d.y, 1) 89 * ai := phi(a) = (a.x*C^2, a.y*C^3, a.z) 90 * 91 * The group addition functions work correctly on these isomorphic curves. 92 * In particular phi(d) is easy to represent in affine coordinates under this isomorphism. 93 * This lets us use the faster haskellsecp256k1_v0_1_0_gej_add_ge_var group addition function that we wouldn't be able to use otherwise. 94 */ 95 haskellsecp256k1_v0_1_0_ge_set_xy(&d_ge, &d.x, &d.y); 96 haskellsecp256k1_v0_1_0_ge_set_gej_zinv(&pre_a[0], a, &d.z); 97 haskellsecp256k1_v0_1_0_gej_set_ge(&ai, &pre_a[0]); 98 ai.z = a->z; 99 100 /* pre_a[0] is the point (a.x*C^2, a.y*C^3, a.z*C) which is equivalent to a. 101 * Set zr[0] to C, which is the ratio between the omitted z(pre_a[0]) value and a.z. 102 */ 103 zr[0] = d.z; 104 105 for (i = 1; i < n; i++) { 106 haskellsecp256k1_v0_1_0_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]); 107 haskellsecp256k1_v0_1_0_ge_set_xy(&pre_a[i], &ai.x, &ai.y); 108 } 109 110 /* Multiply the last z-coordinate by C to undo the isomorphism. 111 * Since the z-coordinates of the pre_a values are implied by the zr array of z-coordinate ratios, 112 * undoing the isomorphism here undoes the isomorphism for all pre_a values. 113 */ 114 haskellsecp256k1_v0_1_0_fe_mul(z, &ai.z, &d.z); 115 } 116 117 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_ecmult_table_verify(int n, int w) { 118 (void)n; 119 (void)w; 120 VERIFY_CHECK(((n) & 1) == 1); 121 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); 122 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); 123 } 124 125 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_ecmult_table_get_ge(haskellsecp256k1_v0_1_0_ge *r, const haskellsecp256k1_v0_1_0_ge *pre, int n, int w) { 126 haskellsecp256k1_v0_1_0_ecmult_table_verify(n,w); 127 if (n > 0) { 128 *r = pre[(n-1)/2]; 129 } else { 130 *r = pre[(-n-1)/2]; 131 haskellsecp256k1_v0_1_0_fe_negate(&(r->y), &(r->y), 1); 132 } 133 } 134 135 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_ecmult_table_get_ge_lambda(haskellsecp256k1_v0_1_0_ge *r, const haskellsecp256k1_v0_1_0_ge *pre, const haskellsecp256k1_v0_1_0_fe *x, int n, int w) { 136 haskellsecp256k1_v0_1_0_ecmult_table_verify(n,w); 137 if (n > 0) { 138 haskellsecp256k1_v0_1_0_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y); 139 } else { 140 haskellsecp256k1_v0_1_0_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y); 141 haskellsecp256k1_v0_1_0_fe_negate(&(r->y), &(r->y), 1); 142 } 143 } 144 145 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_ecmult_table_get_ge_storage(haskellsecp256k1_v0_1_0_ge *r, const haskellsecp256k1_v0_1_0_ge_storage *pre, int n, int w) { 146 haskellsecp256k1_v0_1_0_ecmult_table_verify(n,w); 147 if (n > 0) { 148 haskellsecp256k1_v0_1_0_ge_from_storage(r, &pre[(n-1)/2]); 149 } else { 150 haskellsecp256k1_v0_1_0_ge_from_storage(r, &pre[(-n-1)/2]); 151 haskellsecp256k1_v0_1_0_fe_negate(&(r->y), &(r->y), 1); 152 } 153 } 154 155 /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), 156 * with the following guarantees: 157 * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) 158 * - two non-zero entries in wnaf are separated by at least w-1 zeroes. 159 * - the number of set values in wnaf is returned. This number is at most 256, and at most one more 160 * than the number of bits in the (absolute value) of the input. 161 */ 162 static int haskellsecp256k1_v0_1_0_ecmult_wnaf(int *wnaf, int len, const haskellsecp256k1_v0_1_0_scalar *a, int w) { 163 haskellsecp256k1_v0_1_0_scalar s; 164 int last_set_bit = -1; 165 int bit = 0; 166 int sign = 1; 167 int carry = 0; 168 169 VERIFY_CHECK(wnaf != NULL); 170 VERIFY_CHECK(0 <= len && len <= 256); 171 VERIFY_CHECK(a != NULL); 172 VERIFY_CHECK(2 <= w && w <= 31); 173 174 memset(wnaf, 0, len * sizeof(wnaf[0])); 175 176 s = *a; 177 if (haskellsecp256k1_v0_1_0_scalar_get_bits(&s, 255, 1)) { 178 haskellsecp256k1_v0_1_0_scalar_negate(&s, &s); 179 sign = -1; 180 } 181 182 while (bit < len) { 183 int now; 184 int word; 185 if (haskellsecp256k1_v0_1_0_scalar_get_bits(&s, bit, 1) == (unsigned int)carry) { 186 bit++; 187 continue; 188 } 189 190 now = w; 191 if (now > len - bit) { 192 now = len - bit; 193 } 194 195 word = haskellsecp256k1_v0_1_0_scalar_get_bits_var(&s, bit, now) + carry; 196 197 carry = (word >> (w-1)) & 1; 198 word -= carry << w; 199 200 wnaf[bit] = sign * word; 201 last_set_bit = bit; 202 203 bit += now; 204 } 205 #ifdef VERIFY 206 { 207 int verify_bit = bit; 208 209 VERIFY_CHECK(carry == 0); 210 211 while (verify_bit < 256) { 212 VERIFY_CHECK(haskellsecp256k1_v0_1_0_scalar_get_bits(&s, verify_bit, 1) == 0); 213 verify_bit++; 214 } 215 } 216 #endif 217 return last_set_bit + 1; 218 } 219 220 struct haskellsecp256k1_v0_1_0_strauss_point_state { 221 int wnaf_na_1[129]; 222 int wnaf_na_lam[129]; 223 int bits_na_1; 224 int bits_na_lam; 225 }; 226 227 struct haskellsecp256k1_v0_1_0_strauss_state { 228 /* aux is used to hold z-ratios, and then used to hold pre_a[i].x * BETA values. */ 229 haskellsecp256k1_v0_1_0_fe* aux; 230 haskellsecp256k1_v0_1_0_ge* pre_a; 231 struct haskellsecp256k1_v0_1_0_strauss_point_state* ps; 232 }; 233 234 static void haskellsecp256k1_v0_1_0_ecmult_strauss_wnaf(const struct haskellsecp256k1_v0_1_0_strauss_state *state, haskellsecp256k1_v0_1_0_gej *r, size_t num, const haskellsecp256k1_v0_1_0_gej *a, const haskellsecp256k1_v0_1_0_scalar *na, const haskellsecp256k1_v0_1_0_scalar *ng) { 235 haskellsecp256k1_v0_1_0_ge tmpa; 236 haskellsecp256k1_v0_1_0_fe Z; 237 /* Split G factors. */ 238 haskellsecp256k1_v0_1_0_scalar ng_1, ng_128; 239 int wnaf_ng_1[129]; 240 int bits_ng_1 = 0; 241 int wnaf_ng_128[129]; 242 int bits_ng_128 = 0; 243 int i; 244 int bits = 0; 245 size_t np; 246 size_t no = 0; 247 248 haskellsecp256k1_v0_1_0_fe_set_int(&Z, 1); 249 for (np = 0; np < num; ++np) { 250 haskellsecp256k1_v0_1_0_gej tmp; 251 haskellsecp256k1_v0_1_0_scalar na_1, na_lam; 252 if (haskellsecp256k1_v0_1_0_scalar_is_zero(&na[np]) || haskellsecp256k1_v0_1_0_gej_is_infinity(&a[np])) { 253 continue; 254 } 255 /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ 256 haskellsecp256k1_v0_1_0_scalar_split_lambda(&na_1, &na_lam, &na[np]); 257 258 /* build wnaf representation for na_1 and na_lam. */ 259 state->ps[no].bits_na_1 = haskellsecp256k1_v0_1_0_ecmult_wnaf(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A); 260 state->ps[no].bits_na_lam = haskellsecp256k1_v0_1_0_ecmult_wnaf(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A); 261 VERIFY_CHECK(state->ps[no].bits_na_1 <= 129); 262 VERIFY_CHECK(state->ps[no].bits_na_lam <= 129); 263 if (state->ps[no].bits_na_1 > bits) { 264 bits = state->ps[no].bits_na_1; 265 } 266 if (state->ps[no].bits_na_lam > bits) { 267 bits = state->ps[no].bits_na_lam; 268 } 269 270 /* Calculate odd multiples of a. 271 * All multiples are brought to the same Z 'denominator', which is stored 272 * in Z. Due to secp256k1' isomorphism we can do all operations pretending 273 * that the Z coordinate was 1, use affine addition formulae, and correct 274 * the Z coordinate of the result once at the end. 275 * The exception is the precomputed G table points, which are actually 276 * affine. Compared to the base used for other points, they have a Z ratio 277 * of 1/Z, so we can use haskellsecp256k1_v0_1_0_gej_add_zinv_var, which uses the same 278 * isomorphism to efficiently add with a known Z inverse. 279 */ 280 tmp = a[np]; 281 if (no) { 282 haskellsecp256k1_v0_1_0_gej_rescale(&tmp, &Z); 283 } 284 haskellsecp256k1_v0_1_0_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp); 285 if (no) haskellsecp256k1_v0_1_0_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z)); 286 287 ++no; 288 } 289 290 /* Bring them to the same Z denominator. */ 291 if (no) { 292 haskellsecp256k1_v0_1_0_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux); 293 } 294 295 for (np = 0; np < no; ++np) { 296 for (i = 0; i < ECMULT_TABLE_SIZE(WINDOW_A); i++) { 297 haskellsecp256k1_v0_1_0_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + i], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + i].x, &haskellsecp256k1_v0_1_0_const_beta); 298 } 299 } 300 301 if (ng) { 302 /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ 303 haskellsecp256k1_v0_1_0_scalar_split_128(&ng_1, &ng_128, ng); 304 305 /* Build wnaf representation for ng_1 and ng_128 */ 306 bits_ng_1 = haskellsecp256k1_v0_1_0_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); 307 bits_ng_128 = haskellsecp256k1_v0_1_0_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); 308 if (bits_ng_1 > bits) { 309 bits = bits_ng_1; 310 } 311 if (bits_ng_128 > bits) { 312 bits = bits_ng_128; 313 } 314 } 315 316 haskellsecp256k1_v0_1_0_gej_set_infinity(r); 317 318 for (i = bits - 1; i >= 0; i--) { 319 int n; 320 haskellsecp256k1_v0_1_0_gej_double_var(r, r, NULL); 321 for (np = 0; np < no; ++np) { 322 if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { 323 haskellsecp256k1_v0_1_0_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); 324 haskellsecp256k1_v0_1_0_gej_add_ge_var(r, r, &tmpa, NULL); 325 } 326 if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { 327 haskellsecp256k1_v0_1_0_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); 328 haskellsecp256k1_v0_1_0_gej_add_ge_var(r, r, &tmpa, NULL); 329 } 330 } 331 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { 332 haskellsecp256k1_v0_1_0_ecmult_table_get_ge_storage(&tmpa, haskellsecp256k1_v0_1_0_pre_g, n, WINDOW_G); 333 haskellsecp256k1_v0_1_0_gej_add_zinv_var(r, r, &tmpa, &Z); 334 } 335 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { 336 haskellsecp256k1_v0_1_0_ecmult_table_get_ge_storage(&tmpa, haskellsecp256k1_v0_1_0_pre_g_128, n, WINDOW_G); 337 haskellsecp256k1_v0_1_0_gej_add_zinv_var(r, r, &tmpa, &Z); 338 } 339 } 340 341 if (!r->infinity) { 342 haskellsecp256k1_v0_1_0_fe_mul(&r->z, &r->z, &Z); 343 } 344 } 345 346 static void haskellsecp256k1_v0_1_0_ecmult(haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_gej *a, const haskellsecp256k1_v0_1_0_scalar *na, const haskellsecp256k1_v0_1_0_scalar *ng) { 347 haskellsecp256k1_v0_1_0_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)]; 348 haskellsecp256k1_v0_1_0_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; 349 struct haskellsecp256k1_v0_1_0_strauss_point_state ps[1]; 350 struct haskellsecp256k1_v0_1_0_strauss_state state; 351 352 state.aux = aux; 353 state.pre_a = pre_a; 354 state.ps = ps; 355 haskellsecp256k1_v0_1_0_ecmult_strauss_wnaf(&state, r, 1, a, na, ng); 356 } 357 358 static size_t haskellsecp256k1_v0_1_0_strauss_scratch_size(size_t n_points) { 359 static const size_t point_size = (sizeof(haskellsecp256k1_v0_1_0_ge) + sizeof(haskellsecp256k1_v0_1_0_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct haskellsecp256k1_v0_1_0_strauss_point_state) + sizeof(haskellsecp256k1_v0_1_0_gej) + sizeof(haskellsecp256k1_v0_1_0_scalar); 360 return n_points*point_size; 361 } 362 363 static int haskellsecp256k1_v0_1_0_ecmult_strauss_batch(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch *scratch, haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_scalar *inp_g_sc, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { 364 haskellsecp256k1_v0_1_0_gej* points; 365 haskellsecp256k1_v0_1_0_scalar* scalars; 366 struct haskellsecp256k1_v0_1_0_strauss_state state; 367 size_t i; 368 const size_t scratch_checkpoint = haskellsecp256k1_v0_1_0_scratch_checkpoint(error_callback, scratch); 369 370 haskellsecp256k1_v0_1_0_gej_set_infinity(r); 371 if (inp_g_sc == NULL && n_points == 0) { 372 return 1; 373 } 374 375 /* We allocate STRAUSS_SCRATCH_OBJECTS objects on the scratch space. If these 376 * allocations change, make sure to update the STRAUSS_SCRATCH_OBJECTS 377 * constant and strauss_scratch_size accordingly. */ 378 points = (haskellsecp256k1_v0_1_0_gej*)haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * sizeof(haskellsecp256k1_v0_1_0_gej)); 379 scalars = (haskellsecp256k1_v0_1_0_scalar*)haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * sizeof(haskellsecp256k1_v0_1_0_scalar)); 380 state.aux = (haskellsecp256k1_v0_1_0_fe*)haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(haskellsecp256k1_v0_1_0_fe)); 381 state.pre_a = (haskellsecp256k1_v0_1_0_ge*)haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(haskellsecp256k1_v0_1_0_ge)); 382 state.ps = (struct haskellsecp256k1_v0_1_0_strauss_point_state*)haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, n_points * sizeof(struct haskellsecp256k1_v0_1_0_strauss_point_state)); 383 384 if (points == NULL || scalars == NULL || state.aux == NULL || state.pre_a == NULL || state.ps == NULL) { 385 haskellsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 386 return 0; 387 } 388 389 for (i = 0; i < n_points; i++) { 390 haskellsecp256k1_v0_1_0_ge point; 391 if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { 392 haskellsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 393 return 0; 394 } 395 haskellsecp256k1_v0_1_0_gej_set_ge(&points[i], &point); 396 } 397 haskellsecp256k1_v0_1_0_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc); 398 haskellsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 399 return 1; 400 } 401 402 /* Wrapper for haskellsecp256k1_v0_1_0_ecmult_multi_func interface */ 403 static int haskellsecp256k1_v0_1_0_ecmult_strauss_batch_single(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch *scratch, haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_scalar *inp_g_sc, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n) { 404 return haskellsecp256k1_v0_1_0_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); 405 } 406 407 static size_t haskellsecp256k1_v0_1_0_strauss_max_points(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch *scratch) { 408 return haskellsecp256k1_v0_1_0_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / haskellsecp256k1_v0_1_0_strauss_scratch_size(1); 409 } 410 411 /** Convert a number to WNAF notation. 412 * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. 413 * It has the following guarantees: 414 * - each wnaf[i] is either 0 or an odd integer between -(1 << w) and (1 << w) 415 * - the number of words set is always WNAF_SIZE(w) 416 * - the returned skew is 0 or 1 417 */ 418 static int haskellsecp256k1_v0_1_0_wnaf_fixed(int *wnaf, const haskellsecp256k1_v0_1_0_scalar *s, int w) { 419 int skew = 0; 420 int pos; 421 int max_pos; 422 int last_w; 423 const haskellsecp256k1_v0_1_0_scalar *work = s; 424 425 if (haskellsecp256k1_v0_1_0_scalar_is_zero(s)) { 426 for (pos = 0; pos < WNAF_SIZE(w); pos++) { 427 wnaf[pos] = 0; 428 } 429 return 0; 430 } 431 432 if (haskellsecp256k1_v0_1_0_scalar_is_even(s)) { 433 skew = 1; 434 } 435 436 wnaf[0] = haskellsecp256k1_v0_1_0_scalar_get_bits_var(work, 0, w) + skew; 437 /* Compute last window size. Relevant when window size doesn't divide the 438 * number of bits in the scalar */ 439 last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; 440 441 /* Store the position of the first nonzero word in max_pos to allow 442 * skipping leading zeros when calculating the wnaf. */ 443 for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { 444 int val = haskellsecp256k1_v0_1_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); 445 if(val != 0) { 446 break; 447 } 448 wnaf[pos] = 0; 449 } 450 max_pos = pos; 451 pos = 1; 452 453 while (pos <= max_pos) { 454 int val = haskellsecp256k1_v0_1_0_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); 455 if ((val & 1) == 0) { 456 wnaf[pos - 1] -= (1 << w); 457 wnaf[pos] = (val + 1); 458 } else { 459 wnaf[pos] = val; 460 } 461 /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit 462 * is strictly negative or strictly positive respectively. Only change 463 * coefficients at previous positions because above code assumes that 464 * wnaf[pos - 1] is odd. 465 */ 466 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) { 467 if (wnaf[pos - 1] == 1) { 468 wnaf[pos - 2] += 1 << w; 469 } else { 470 wnaf[pos - 2] -= 1 << w; 471 } 472 wnaf[pos - 1] = 0; 473 } 474 ++pos; 475 } 476 477 return skew; 478 } 479 480 struct haskellsecp256k1_v0_1_0_pippenger_point_state { 481 int skew_na; 482 size_t input_pos; 483 }; 484 485 struct haskellsecp256k1_v0_1_0_pippenger_state { 486 int *wnaf_na; 487 struct haskellsecp256k1_v0_1_0_pippenger_point_state* ps; 488 }; 489 490 /* 491 * pippenger_wnaf computes the result of a multi-point multiplication as 492 * follows: The scalars are brought into wnaf with n_wnaf elements each. Then 493 * for every i < n_wnaf, first each point is added to a "bucket" corresponding 494 * to the point's wnaf[i]. Second, the buckets are added together such that 495 * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... 496 */ 497 static int haskellsecp256k1_v0_1_0_ecmult_pippenger_wnaf(haskellsecp256k1_v0_1_0_gej *buckets, int bucket_window, struct haskellsecp256k1_v0_1_0_pippenger_state *state, haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_scalar *sc, const haskellsecp256k1_v0_1_0_ge *pt, size_t num) { 498 size_t n_wnaf = WNAF_SIZE(bucket_window+1); 499 size_t np; 500 size_t no = 0; 501 int i; 502 int j; 503 504 for (np = 0; np < num; ++np) { 505 if (haskellsecp256k1_v0_1_0_scalar_is_zero(&sc[np]) || haskellsecp256k1_v0_1_0_ge_is_infinity(&pt[np])) { 506 continue; 507 } 508 state->ps[no].input_pos = np; 509 state->ps[no].skew_na = haskellsecp256k1_v0_1_0_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); 510 no++; 511 } 512 haskellsecp256k1_v0_1_0_gej_set_infinity(r); 513 514 if (no == 0) { 515 return 1; 516 } 517 518 for (i = n_wnaf - 1; i >= 0; i--) { 519 haskellsecp256k1_v0_1_0_gej running_sum; 520 521 for(j = 0; j < ECMULT_TABLE_SIZE(bucket_window+2); j++) { 522 haskellsecp256k1_v0_1_0_gej_set_infinity(&buckets[j]); 523 } 524 525 for (np = 0; np < no; ++np) { 526 int n = state->wnaf_na[np*n_wnaf + i]; 527 struct haskellsecp256k1_v0_1_0_pippenger_point_state point_state = state->ps[np]; 528 haskellsecp256k1_v0_1_0_ge tmp; 529 int idx; 530 531 if (i == 0) { 532 /* correct for wnaf skew */ 533 int skew = point_state.skew_na; 534 if (skew) { 535 haskellsecp256k1_v0_1_0_ge_neg(&tmp, &pt[point_state.input_pos]); 536 haskellsecp256k1_v0_1_0_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); 537 } 538 } 539 if (n > 0) { 540 idx = (n - 1)/2; 541 haskellsecp256k1_v0_1_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &pt[point_state.input_pos], NULL); 542 } else if (n < 0) { 543 idx = -(n + 1)/2; 544 haskellsecp256k1_v0_1_0_ge_neg(&tmp, &pt[point_state.input_pos]); 545 haskellsecp256k1_v0_1_0_gej_add_ge_var(&buckets[idx], &buckets[idx], &tmp, NULL); 546 } 547 } 548 549 for(j = 0; j < bucket_window; j++) { 550 haskellsecp256k1_v0_1_0_gej_double_var(r, r, NULL); 551 } 552 553 haskellsecp256k1_v0_1_0_gej_set_infinity(&running_sum); 554 /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... 555 * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... 556 * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) 557 * using an intermediate running sum: 558 * running_sum = bucket[0] + bucket[1] + bucket[2] + ... 559 * 560 * The doubling is done implicitly by deferring the final window doubling (of 'r'). 561 */ 562 for(j = ECMULT_TABLE_SIZE(bucket_window+2) - 1; j > 0; j--) { 563 haskellsecp256k1_v0_1_0_gej_add_var(&running_sum, &running_sum, &buckets[j], NULL); 564 haskellsecp256k1_v0_1_0_gej_add_var(r, r, &running_sum, NULL); 565 } 566 567 haskellsecp256k1_v0_1_0_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); 568 haskellsecp256k1_v0_1_0_gej_double_var(r, r, NULL); 569 haskellsecp256k1_v0_1_0_gej_add_var(r, r, &running_sum, NULL); 570 } 571 return 1; 572 } 573 574 /** 575 * Returns optimal bucket_window (number of bits of a scalar represented by a 576 * set of buckets) for a given number of points. 577 */ 578 static int haskellsecp256k1_v0_1_0_pippenger_bucket_window(size_t n) { 579 if (n <= 1) { 580 return 1; 581 } else if (n <= 4) { 582 return 2; 583 } else if (n <= 20) { 584 return 3; 585 } else if (n <= 57) { 586 return 4; 587 } else if (n <= 136) { 588 return 5; 589 } else if (n <= 235) { 590 return 6; 591 } else if (n <= 1260) { 592 return 7; 593 } else if (n <= 4420) { 594 return 9; 595 } else if (n <= 7880) { 596 return 10; 597 } else if (n <= 16050) { 598 return 11; 599 } else { 600 return PIPPENGER_MAX_BUCKET_WINDOW; 601 } 602 } 603 604 /** 605 * Returns the maximum optimal number of points for a bucket_window. 606 */ 607 static size_t haskellsecp256k1_v0_1_0_pippenger_bucket_window_inv(int bucket_window) { 608 switch(bucket_window) { 609 case 1: return 1; 610 case 2: return 4; 611 case 3: return 20; 612 case 4: return 57; 613 case 5: return 136; 614 case 6: return 235; 615 case 7: return 1260; 616 case 8: return 1260; 617 case 9: return 4420; 618 case 10: return 7880; 619 case 11: return 16050; 620 case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; 621 } 622 return 0; 623 } 624 625 626 SECP256K1_INLINE static void haskellsecp256k1_v0_1_0_ecmult_endo_split(haskellsecp256k1_v0_1_0_scalar *s1, haskellsecp256k1_v0_1_0_scalar *s2, haskellsecp256k1_v0_1_0_ge *p1, haskellsecp256k1_v0_1_0_ge *p2) { 627 haskellsecp256k1_v0_1_0_scalar tmp = *s1; 628 haskellsecp256k1_v0_1_0_scalar_split_lambda(s1, s2, &tmp); 629 haskellsecp256k1_v0_1_0_ge_mul_lambda(p2, p1); 630 631 if (haskellsecp256k1_v0_1_0_scalar_is_high(s1)) { 632 haskellsecp256k1_v0_1_0_scalar_negate(s1, s1); 633 haskellsecp256k1_v0_1_0_ge_neg(p1, p1); 634 } 635 if (haskellsecp256k1_v0_1_0_scalar_is_high(s2)) { 636 haskellsecp256k1_v0_1_0_scalar_negate(s2, s2); 637 haskellsecp256k1_v0_1_0_ge_neg(p2, p2); 638 } 639 } 640 641 /** 642 * Returns the scratch size required for a given number of points (excluding 643 * base point G) without considering alignment. 644 */ 645 static size_t haskellsecp256k1_v0_1_0_pippenger_scratch_size(size_t n_points, int bucket_window) { 646 size_t entries = 2*n_points + 2; 647 size_t entry_size = sizeof(haskellsecp256k1_v0_1_0_ge) + sizeof(haskellsecp256k1_v0_1_0_scalar) + sizeof(struct haskellsecp256k1_v0_1_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); 648 return (sizeof(haskellsecp256k1_v0_1_0_gej) << bucket_window) + sizeof(struct haskellsecp256k1_v0_1_0_pippenger_state) + entries * entry_size; 649 } 650 651 static int haskellsecp256k1_v0_1_0_ecmult_pippenger_batch(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch *scratch, haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_scalar *inp_g_sc, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { 652 const size_t scratch_checkpoint = haskellsecp256k1_v0_1_0_scratch_checkpoint(error_callback, scratch); 653 /* Use 2(n+1) with the endomorphism, when calculating batch 654 * sizes. The reason for +1 is that we add the G scalar to the list of 655 * other scalars. */ 656 size_t entries = 2*n_points + 2; 657 haskellsecp256k1_v0_1_0_ge *points; 658 haskellsecp256k1_v0_1_0_scalar *scalars; 659 haskellsecp256k1_v0_1_0_gej *buckets; 660 struct haskellsecp256k1_v0_1_0_pippenger_state *state_space; 661 size_t idx = 0; 662 size_t point_idx = 0; 663 int i, j; 664 int bucket_window; 665 666 haskellsecp256k1_v0_1_0_gej_set_infinity(r); 667 if (inp_g_sc == NULL && n_points == 0) { 668 return 1; 669 } 670 bucket_window = haskellsecp256k1_v0_1_0_pippenger_bucket_window(n_points); 671 672 /* We allocate PIPPENGER_SCRATCH_OBJECTS objects on the scratch space. If 673 * these allocations change, make sure to update the 674 * PIPPENGER_SCRATCH_OBJECTS constant and pippenger_scratch_size 675 * accordingly. */ 676 points = (haskellsecp256k1_v0_1_0_ge *) haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); 677 scalars = (haskellsecp256k1_v0_1_0_scalar *) haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); 678 state_space = (struct haskellsecp256k1_v0_1_0_pippenger_state *) haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, sizeof(*state_space)); 679 if (points == NULL || scalars == NULL || state_space == NULL) { 680 haskellsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 681 return 0; 682 } 683 state_space->ps = (struct haskellsecp256k1_v0_1_0_pippenger_point_state *) haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); 684 state_space->wnaf_na = (int *) haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); 685 buckets = (haskellsecp256k1_v0_1_0_gej *) haskellsecp256k1_v0_1_0_scratch_alloc(error_callback, scratch, ((size_t)1 << bucket_window) * sizeof(*buckets)); 686 if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { 687 haskellsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 688 return 0; 689 } 690 691 if (inp_g_sc != NULL) { 692 scalars[0] = *inp_g_sc; 693 points[0] = haskellsecp256k1_v0_1_0_ge_const_g; 694 idx++; 695 haskellsecp256k1_v0_1_0_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); 696 idx++; 697 } 698 699 while (point_idx < n_points) { 700 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { 701 haskellsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 702 return 0; 703 } 704 idx++; 705 haskellsecp256k1_v0_1_0_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); 706 idx++; 707 point_idx++; 708 } 709 710 haskellsecp256k1_v0_1_0_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); 711 712 /* Clear data */ 713 for(i = 0; (size_t)i < idx; i++) { 714 haskellsecp256k1_v0_1_0_scalar_clear(&scalars[i]); 715 state_space->ps[i].skew_na = 0; 716 for(j = 0; j < WNAF_SIZE(bucket_window+1); j++) { 717 state_space->wnaf_na[i * WNAF_SIZE(bucket_window+1) + j] = 0; 718 } 719 } 720 for(i = 0; i < 1<<bucket_window; i++) { 721 haskellsecp256k1_v0_1_0_gej_clear(&buckets[i]); 722 } 723 haskellsecp256k1_v0_1_0_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 724 return 1; 725 } 726 727 /* Wrapper for haskellsecp256k1_v0_1_0_ecmult_multi_func interface */ 728 static int haskellsecp256k1_v0_1_0_ecmult_pippenger_batch_single(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch *scratch, haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_scalar *inp_g_sc, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n) { 729 return haskellsecp256k1_v0_1_0_ecmult_pippenger_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); 730 } 731 732 /** 733 * Returns the maximum number of points in addition to G that can be used with 734 * a given scratch space. The function ensures that fewer points may also be 735 * used. 736 */ 737 static size_t haskellsecp256k1_v0_1_0_pippenger_max_points(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch *scratch) { 738 size_t max_alloc = haskellsecp256k1_v0_1_0_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS); 739 int bucket_window; 740 size_t res = 0; 741 742 for (bucket_window = 1; bucket_window <= PIPPENGER_MAX_BUCKET_WINDOW; bucket_window++) { 743 size_t n_points; 744 size_t max_points = haskellsecp256k1_v0_1_0_pippenger_bucket_window_inv(bucket_window); 745 size_t space_for_points; 746 size_t space_overhead; 747 size_t entry_size = sizeof(haskellsecp256k1_v0_1_0_ge) + sizeof(haskellsecp256k1_v0_1_0_scalar) + sizeof(struct haskellsecp256k1_v0_1_0_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); 748 749 entry_size = 2*entry_size; 750 space_overhead = (sizeof(haskellsecp256k1_v0_1_0_gej) << bucket_window) + entry_size + sizeof(struct haskellsecp256k1_v0_1_0_pippenger_state); 751 if (space_overhead > max_alloc) { 752 break; 753 } 754 space_for_points = max_alloc - space_overhead; 755 756 n_points = space_for_points/entry_size; 757 n_points = n_points > max_points ? max_points : n_points; 758 if (n_points > res) { 759 res = n_points; 760 } 761 if (n_points < max_points) { 762 /* A larger bucket_window may support even more points. But if we 763 * would choose that then the caller couldn't safely use any number 764 * smaller than what this function returns */ 765 break; 766 } 767 } 768 return res; 769 } 770 771 /* Computes ecmult_multi by simply multiplying and adding each point. Does not 772 * require a scratch space */ 773 static int haskellsecp256k1_v0_1_0_ecmult_multi_simple_var(haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_scalar *inp_g_sc, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n_points) { 774 size_t point_idx; 775 haskellsecp256k1_v0_1_0_gej tmpj; 776 777 haskellsecp256k1_v0_1_0_gej_set_infinity(r); 778 haskellsecp256k1_v0_1_0_gej_set_infinity(&tmpj); 779 /* r = inp_g_sc*G */ 780 haskellsecp256k1_v0_1_0_ecmult(r, &tmpj, &haskellsecp256k1_v0_1_0_scalar_zero, inp_g_sc); 781 for (point_idx = 0; point_idx < n_points; point_idx++) { 782 haskellsecp256k1_v0_1_0_ge point; 783 haskellsecp256k1_v0_1_0_gej pointj; 784 haskellsecp256k1_v0_1_0_scalar scalar; 785 if (!cb(&scalar, &point, point_idx, cbdata)) { 786 return 0; 787 } 788 /* r += scalar*point */ 789 haskellsecp256k1_v0_1_0_gej_set_ge(&pointj, &point); 790 haskellsecp256k1_v0_1_0_ecmult(&tmpj, &pointj, &scalar, NULL); 791 haskellsecp256k1_v0_1_0_gej_add_var(r, r, &tmpj, NULL); 792 } 793 return 1; 794 } 795 796 /* Compute the number of batches and the batch size given the maximum batch size and the 797 * total number of points */ 798 static int haskellsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { 799 if (max_n_batch_points == 0) { 800 return 0; 801 } 802 if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) { 803 max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; 804 } 805 if (n == 0) { 806 *n_batches = 0; 807 *n_batch_points = 0; 808 return 1; 809 } 810 /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */ 811 *n_batches = 1 + (n - 1) / max_n_batch_points; 812 *n_batch_points = 1 + (n - 1) / *n_batches; 813 return 1; 814 } 815 816 typedef int (*haskellsecp256k1_v0_1_0_ecmult_multi_func)(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch*, haskellsecp256k1_v0_1_0_gej*, const haskellsecp256k1_v0_1_0_scalar*, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void*, size_t); 817 static int haskellsecp256k1_v0_1_0_ecmult_multi_var(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch *scratch, haskellsecp256k1_v0_1_0_gej *r, const haskellsecp256k1_v0_1_0_scalar *inp_g_sc, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void *cbdata, size_t n) { 818 size_t i; 819 820 int (*f)(const haskellsecp256k1_v0_1_0_callback* error_callback, haskellsecp256k1_v0_1_0_scratch*, haskellsecp256k1_v0_1_0_gej*, const haskellsecp256k1_v0_1_0_scalar*, haskellsecp256k1_v0_1_0_ecmult_multi_callback cb, void*, size_t, size_t); 821 size_t n_batches; 822 size_t n_batch_points; 823 824 haskellsecp256k1_v0_1_0_gej_set_infinity(r); 825 if (inp_g_sc == NULL && n == 0) { 826 return 1; 827 } else if (n == 0) { 828 haskellsecp256k1_v0_1_0_ecmult(r, r, &haskellsecp256k1_v0_1_0_scalar_zero, inp_g_sc); 829 return 1; 830 } 831 if (scratch == NULL) { 832 return haskellsecp256k1_v0_1_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 833 } 834 835 /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than 836 * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. 837 * As a first step check if there's enough space for Pippenger's algo (which requires less space 838 * than Strauss' algo) and if not, use the simple algorithm. */ 839 if (!haskellsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, haskellsecp256k1_v0_1_0_pippenger_max_points(error_callback, scratch), n)) { 840 return haskellsecp256k1_v0_1_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 841 } 842 if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { 843 f = haskellsecp256k1_v0_1_0_ecmult_pippenger_batch; 844 } else { 845 if (!haskellsecp256k1_v0_1_0_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, haskellsecp256k1_v0_1_0_strauss_max_points(error_callback, scratch), n)) { 846 return haskellsecp256k1_v0_1_0_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 847 } 848 f = haskellsecp256k1_v0_1_0_ecmult_strauss_batch; 849 } 850 for(i = 0; i < n_batches; i++) { 851 size_t nbp = n < n_batch_points ? n : n_batch_points; 852 size_t offset = n_batch_points*i; 853 haskellsecp256k1_v0_1_0_gej tmp; 854 if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { 855 return 0; 856 } 857 haskellsecp256k1_v0_1_0_gej_add_var(r, r, &tmp, NULL); 858 n -= nbp; 859 } 860 return 1; 861 } 862 863 #endif /* SECP256K1_ECMULT_IMPL_H */