Bitcoin ABC 0.33.3
P2P Digital Currency
scalar_4x64_impl.h
Go to the documentation of this file.
1/***********************************************************************
2 * Copyright (c) 2013, 2014 Pieter Wuille *
3 * Distributed under the MIT software license, see the accompanying *
4 * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
5 ***********************************************************************/
6
7#ifndef SECP256K1_SCALAR_REPR_IMPL_H
8#define SECP256K1_SCALAR_REPR_IMPL_H
9
10#include "checkmem.h"
11#include "int128.h"
12#include "modinv64_impl.h"
13#include "util.h"
14
15/* Limbs of the secp256k1 order. */
16#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
17#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
18#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
19#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
20
21/* Limbs of 2^256 minus the secp256k1 order. */
22#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
23#define SECP256K1_N_C_1 (~SECP256K1_N_1)
24#define SECP256K1_N_C_2 (1)
25
26/* Limbs of half the secp256k1 order. */
27#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
28#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
29#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
30#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
31
33 r->d[0] = 0;
34 r->d[1] = 0;
35 r->d[2] = 0;
36 r->d[3] = 0;
37}
38
40 r->d[0] = v;
41 r->d[1] = 0;
42 r->d[2] = 0;
43 r->d[3] = 0;
44}
45
46SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
47 VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
48 return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
49}
50
51SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
52 VERIFY_CHECK(count < 32);
53 VERIFY_CHECK(offset + count <= 256);
54 if ((offset + count - 1) >> 6 == offset >> 6) {
55 return secp256k1_scalar_get_bits(a, offset, count);
56 } else {
57 VERIFY_CHECK((offset >> 6) + 1 < 4);
58 return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
59 }
60}
61
63 int yes = 0;
64 int no = 0;
65 no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
66 no |= (a->d[2] < SECP256K1_N_2);
67 yes |= (a->d[2] > SECP256K1_N_2) & ~no;
68 no |= (a->d[1] < SECP256K1_N_1);
69 yes |= (a->d[1] > SECP256K1_N_1) & ~no;
70 yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
71 return yes;
72}
73
74SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
76 VERIFY_CHECK(overflow <= 1);
77 secp256k1_u128_from_u64(&t, r->d[0]);
80 secp256k1_u128_accum_u64(&t, r->d[1]);
83 secp256k1_u128_accum_u64(&t, r->d[2]);
86 secp256k1_u128_accum_u64(&t, r->d[3]);
87 r->d[3] = secp256k1_u128_to_u64(&t);
88 return overflow;
89}
90
92 int overflow;
94 secp256k1_u128_from_u64(&t, a->d[0]);
95 secp256k1_u128_accum_u64(&t, b->d[0]);
97 secp256k1_u128_accum_u64(&t, a->d[1]);
98 secp256k1_u128_accum_u64(&t, b->d[1]);
100 secp256k1_u128_accum_u64(&t, a->d[2]);
101 secp256k1_u128_accum_u64(&t, b->d[2]);
102 r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
103 secp256k1_u128_accum_u64(&t, a->d[3]);
104 secp256k1_u128_accum_u64(&t, b->d[3]);
105 r->d[3] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
107 VERIFY_CHECK(overflow == 0 || overflow == 1);
108 secp256k1_scalar_reduce(r, overflow);
109 return overflow;
110}
111
112static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
114 volatile int vflag = flag;
115 VERIFY_CHECK(bit < 256);
116 bit += ((uint32_t) vflag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
117 secp256k1_u128_from_u64(&t, r->d[0]);
118 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
119 r->d[0] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
120 secp256k1_u128_accum_u64(&t, r->d[1]);
121 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
122 r->d[1] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
123 secp256k1_u128_accum_u64(&t, r->d[2]);
124 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
125 r->d[2] = secp256k1_u128_to_u64(&t); secp256k1_u128_rshift(&t, 64);
126 secp256k1_u128_accum_u64(&t, r->d[3]);
127 secp256k1_u128_accum_u64(&t, ((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
128 r->d[3] = secp256k1_u128_to_u64(&t);
129#ifdef VERIFY
131#endif
132}
133
134static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
135 int over;
136 r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
137 r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
138 r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
139 r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
141 if (overflow) {
142 *overflow = over;
143 }
144}
145
146static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
147 bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
148 bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
149 bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
150 bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
151}
152
154 return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
155}
156
158 uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
160 secp256k1_u128_from_u64(&t, ~a->d[0]);
162 r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
163 secp256k1_u128_accum_u64(&t, ~a->d[1]);
165 r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
166 secp256k1_u128_accum_u64(&t, ~a->d[2]);
168 r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
169 secp256k1_u128_accum_u64(&t, ~a->d[3]);
171 r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
172}
173
175 return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
176}
177
179 int yes = 0;
180 int no = 0;
181 no |= (a->d[3] < SECP256K1_N_H_3);
182 yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
183 no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
184 no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
185 yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
186 yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
187 return yes;
188}
189
191 /* If we are flag = 0, mask = 00...00 and this is a no-op;
192 * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
193 volatile int vflag = flag;
194 uint64_t mask = -vflag;
195 uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
197 secp256k1_u128_from_u64(&t, r->d[0] ^ mask);
198 secp256k1_u128_accum_u64(&t, (SECP256K1_N_0 + 1) & mask);
199 r->d[0] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
200 secp256k1_u128_accum_u64(&t, r->d[1] ^ mask);
202 r->d[1] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
203 secp256k1_u128_accum_u64(&t, r->d[2] ^ mask);
205 r->d[2] = secp256k1_u128_to_u64(&t) & nonzero; secp256k1_u128_rshift(&t, 64);
206 secp256k1_u128_accum_u64(&t, r->d[3] ^ mask);
208 r->d[3] = secp256k1_u128_to_u64(&t) & nonzero;
209 return 2 * (mask == 0) - 1;
210}
211
212/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
213
215#define muladd(a,b) { \
216 uint64_t tl, th; \
217 { \
218 secp256k1_uint128 t; \
219 secp256k1_u128_mul(&t, a, b); \
220 th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
221 tl = secp256k1_u128_to_u64(&t); \
222 } \
223 c0 += tl; /* overflow is handled on the next line */ \
224 th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
225 c1 += th; /* overflow is handled on the next line */ \
226 c2 += (c1 < th); /* never overflows by contract (verified in the next line) */ \
227 VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
228}
229
231#define muladd_fast(a,b) { \
232 uint64_t tl, th; \
233 { \
234 secp256k1_uint128 t; \
235 secp256k1_u128_mul(&t, a, b); \
236 th = secp256k1_u128_hi_u64(&t); /* at most 0xFFFFFFFFFFFFFFFE */ \
237 tl = secp256k1_u128_to_u64(&t); \
238 } \
239 c0 += tl; /* overflow is handled on the next line */ \
240 th += (c0 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \
241 c1 += th; /* never overflows by contract (verified in the next line) */ \
242 VERIFY_CHECK(c1 >= th); \
243}
244
246#define sumadd(a) { \
247 unsigned int over; \
248 c0 += (a); /* overflow is handled on the next line */ \
249 over = (c0 < (a)); \
250 c1 += over; /* overflow is handled on the next line */ \
251 c2 += (c1 < over); /* never overflows by contract */ \
252}
253
255#define sumadd_fast(a) { \
256 c0 += (a); /* overflow is handled on the next line */ \
257 c1 += (c0 < (a)); /* never overflows by contract (verified the next line) */ \
258 VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
259 VERIFY_CHECK(c2 == 0); \
260}
261
263#define extract(n) { \
264 (n) = c0; \
265 c0 = c1; \
266 c1 = c2; \
267 c2 = 0; \
268}
269
271#define extract_fast(n) { \
272 (n) = c0; \
273 c0 = c1; \
274 c1 = 0; \
275 VERIFY_CHECK(c2 == 0); \
276}
277
278static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
279#ifdef USE_ASM_X86_64
280 /* Reduce 512 bits into 385. */
281 uint64_t m0, m1, m2, m3, m4, m5, m6;
282 uint64_t p0, p1, p2, p3, p4;
283 uint64_t c;
284
285 __asm__ __volatile__(
286 /* Preload. */
287 "movq 32(%%rsi), %%r11\n"
288 "movq 40(%%rsi), %%r12\n"
289 "movq 48(%%rsi), %%r13\n"
290 "movq 56(%%rsi), %%r14\n"
291 /* Initialize r8,r9,r10 */
292 "movq 0(%%rsi), %%r8\n"
293 "xorq %%r9, %%r9\n"
294 "xorq %%r10, %%r10\n"
295 /* (r8,r9) += n0 * c0 */
296 "movq %8, %%rax\n"
297 "mulq %%r11\n"
298 "addq %%rax, %%r8\n"
299 "adcq %%rdx, %%r9\n"
300 /* extract m0 */
301 "movq %%r8, %q0\n"
302 "xorq %%r8, %%r8\n"
303 /* (r9,r10) += l1 */
304 "addq 8(%%rsi), %%r9\n"
305 "adcq $0, %%r10\n"
306 /* (r9,r10,r8) += n1 * c0 */
307 "movq %8, %%rax\n"
308 "mulq %%r12\n"
309 "addq %%rax, %%r9\n"
310 "adcq %%rdx, %%r10\n"
311 "adcq $0, %%r8\n"
312 /* (r9,r10,r8) += n0 * c1 */
313 "movq %9, %%rax\n"
314 "mulq %%r11\n"
315 "addq %%rax, %%r9\n"
316 "adcq %%rdx, %%r10\n"
317 "adcq $0, %%r8\n"
318 /* extract m1 */
319 "movq %%r9, %q1\n"
320 "xorq %%r9, %%r9\n"
321 /* (r10,r8,r9) += l2 */
322 "addq 16(%%rsi), %%r10\n"
323 "adcq $0, %%r8\n"
324 "adcq $0, %%r9\n"
325 /* (r10,r8,r9) += n2 * c0 */
326 "movq %8, %%rax\n"
327 "mulq %%r13\n"
328 "addq %%rax, %%r10\n"
329 "adcq %%rdx, %%r8\n"
330 "adcq $0, %%r9\n"
331 /* (r10,r8,r9) += n1 * c1 */
332 "movq %9, %%rax\n"
333 "mulq %%r12\n"
334 "addq %%rax, %%r10\n"
335 "adcq %%rdx, %%r8\n"
336 "adcq $0, %%r9\n"
337 /* (r10,r8,r9) += n0 */
338 "addq %%r11, %%r10\n"
339 "adcq $0, %%r8\n"
340 "adcq $0, %%r9\n"
341 /* extract m2 */
342 "movq %%r10, %q2\n"
343 "xorq %%r10, %%r10\n"
344 /* (r8,r9,r10) += l3 */
345 "addq 24(%%rsi), %%r8\n"
346 "adcq $0, %%r9\n"
347 "adcq $0, %%r10\n"
348 /* (r8,r9,r10) += n3 * c0 */
349 "movq %8, %%rax\n"
350 "mulq %%r14\n"
351 "addq %%rax, %%r8\n"
352 "adcq %%rdx, %%r9\n"
353 "adcq $0, %%r10\n"
354 /* (r8,r9,r10) += n2 * c1 */
355 "movq %9, %%rax\n"
356 "mulq %%r13\n"
357 "addq %%rax, %%r8\n"
358 "adcq %%rdx, %%r9\n"
359 "adcq $0, %%r10\n"
360 /* (r8,r9,r10) += n1 */
361 "addq %%r12, %%r8\n"
362 "adcq $0, %%r9\n"
363 "adcq $0, %%r10\n"
364 /* extract m3 */
365 "movq %%r8, %q3\n"
366 "xorq %%r8, %%r8\n"
367 /* (r9,r10,r8) += n3 * c1 */
368 "movq %9, %%rax\n"
369 "mulq %%r14\n"
370 "addq %%rax, %%r9\n"
371 "adcq %%rdx, %%r10\n"
372 "adcq $0, %%r8\n"
373 /* (r9,r10,r8) += n2 */
374 "addq %%r13, %%r9\n"
375 "adcq $0, %%r10\n"
376 "adcq $0, %%r8\n"
377 /* extract m4 */
378 "movq %%r9, %q4\n"
379 /* (r10,r8) += n3 */
380 "addq %%r14, %%r10\n"
381 "adcq $0, %%r8\n"
382 /* extract m5 */
383 "movq %%r10, %q5\n"
384 /* extract m6 */
385 "movq %%r8, %q6\n"
386 : "=&g"(m0), "=&g"(m1), "=&g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
387 : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
388 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
389
390 /* Reduce 385 bits into 258. */
391 __asm__ __volatile__(
392 /* Preload */
393 "movq %q9, %%r11\n"
394 "movq %q10, %%r12\n"
395 "movq %q11, %%r13\n"
396 /* Initialize (r8,r9,r10) */
397 "movq %q5, %%r8\n"
398 "xorq %%r9, %%r9\n"
399 "xorq %%r10, %%r10\n"
400 /* (r8,r9) += m4 * c0 */
401 "movq %12, %%rax\n"
402 "mulq %%r11\n"
403 "addq %%rax, %%r8\n"
404 "adcq %%rdx, %%r9\n"
405 /* extract p0 */
406 "movq %%r8, %q0\n"
407 "xorq %%r8, %%r8\n"
408 /* (r9,r10) += m1 */
409 "addq %q6, %%r9\n"
410 "adcq $0, %%r10\n"
411 /* (r9,r10,r8) += m5 * c0 */
412 "movq %12, %%rax\n"
413 "mulq %%r12\n"
414 "addq %%rax, %%r9\n"
415 "adcq %%rdx, %%r10\n"
416 "adcq $0, %%r8\n"
417 /* (r9,r10,r8) += m4 * c1 */
418 "movq %13, %%rax\n"
419 "mulq %%r11\n"
420 "addq %%rax, %%r9\n"
421 "adcq %%rdx, %%r10\n"
422 "adcq $0, %%r8\n"
423 /* extract p1 */
424 "movq %%r9, %q1\n"
425 "xorq %%r9, %%r9\n"
426 /* (r10,r8,r9) += m2 */
427 "addq %q7, %%r10\n"
428 "adcq $0, %%r8\n"
429 "adcq $0, %%r9\n"
430 /* (r10,r8,r9) += m6 * c0 */
431 "movq %12, %%rax\n"
432 "mulq %%r13\n"
433 "addq %%rax, %%r10\n"
434 "adcq %%rdx, %%r8\n"
435 "adcq $0, %%r9\n"
436 /* (r10,r8,r9) += m5 * c1 */
437 "movq %13, %%rax\n"
438 "mulq %%r12\n"
439 "addq %%rax, %%r10\n"
440 "adcq %%rdx, %%r8\n"
441 "adcq $0, %%r9\n"
442 /* (r10,r8,r9) += m4 */
443 "addq %%r11, %%r10\n"
444 "adcq $0, %%r8\n"
445 "adcq $0, %%r9\n"
446 /* extract p2 */
447 "movq %%r10, %q2\n"
448 /* (r8,r9) += m3 */
449 "addq %q8, %%r8\n"
450 "adcq $0, %%r9\n"
451 /* (r8,r9) += m6 * c1 */
452 "movq %13, %%rax\n"
453 "mulq %%r13\n"
454 "addq %%rax, %%r8\n"
455 "adcq %%rdx, %%r9\n"
456 /* (r8,r9) += m5 */
457 "addq %%r12, %%r8\n"
458 "adcq $0, %%r9\n"
459 /* extract p3 */
460 "movq %%r8, %q3\n"
461 /* (r9) += m6 */
462 "addq %%r13, %%r9\n"
463 /* extract p4 */
464 "movq %%r9, %q4\n"
465 : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
466 : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
467 : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
468
469 /* Reduce 258 bits into 256. */
470 __asm__ __volatile__(
471 /* Preload */
472 "movq %q5, %%r10\n"
473 /* (rax,rdx) = p4 * c0 */
474 "movq %7, %%rax\n"
475 "mulq %%r10\n"
476 /* (rax,rdx) += p0 */
477 "addq %q1, %%rax\n"
478 "adcq $0, %%rdx\n"
479 /* extract r0 */
480 "movq %%rax, 0(%q6)\n"
481 /* Move to (r8,r9) */
482 "movq %%rdx, %%r8\n"
483 "xorq %%r9, %%r9\n"
484 /* (r8,r9) += p1 */
485 "addq %q2, %%r8\n"
486 "adcq $0, %%r9\n"
487 /* (r8,r9) += p4 * c1 */
488 "movq %8, %%rax\n"
489 "mulq %%r10\n"
490 "addq %%rax, %%r8\n"
491 "adcq %%rdx, %%r9\n"
492 /* Extract r1 */
493 "movq %%r8, 8(%q6)\n"
494 "xorq %%r8, %%r8\n"
495 /* (r9,r8) += p4 */
496 "addq %%r10, %%r9\n"
497 "adcq $0, %%r8\n"
498 /* (r9,r8) += p2 */
499 "addq %q3, %%r9\n"
500 "adcq $0, %%r8\n"
501 /* Extract r2 */
502 "movq %%r9, 16(%q6)\n"
503 "xorq %%r9, %%r9\n"
504 /* (r8,r9) += p3 */
505 "addq %q4, %%r8\n"
506 "adcq $0, %%r9\n"
507 /* Extract r3 */
508 "movq %%r8, 24(%q6)\n"
509 /* Extract c */
510 "movq %%r9, %q0\n"
511 : "=g"(c)
512 : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1)
513 : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
514#else
516 uint64_t c, c0, c1, c2;
517 uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
518 uint64_t m0, m1, m2, m3, m4, m5;
519 uint32_t m6;
520 uint64_t p0, p1, p2, p3;
521 uint32_t p4;
522
523 /* Reduce 512 bits into 385. */
524 /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
525 c0 = l[0]; c1 = 0; c2 = 0;
527 extract_fast(m0);
528 sumadd_fast(l[1]);
531 extract(m1);
532 sumadd(l[2]);
535 sumadd(n0);
536 extract(m2);
537 sumadd(l[3]);
540 sumadd(n1);
541 extract(m3);
543 sumadd(n2);
544 extract(m4);
545 sumadd_fast(n3);
546 extract_fast(m5);
547 VERIFY_CHECK(c0 <= 1);
548 m6 = c0;
549
550 /* Reduce 385 bits into 258. */
551 /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
552 c0 = m0; c1 = 0; c2 = 0;
554 extract_fast(p0);
555 sumadd_fast(m1);
558 extract(p1);
559 sumadd(m2);
562 sumadd(m4);
563 extract(p2);
564 sumadd_fast(m3);
566 sumadd_fast(m5);
567 extract_fast(p3);
568 p4 = c0 + m6;
569 VERIFY_CHECK(p4 <= 2);
570
571 /* Reduce 258 bits into 256. */
572 /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
573 secp256k1_u128_from_u64(&c128, p0);
575 r->d[0] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
576 secp256k1_u128_accum_u64(&c128, p1);
578 r->d[1] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
579 secp256k1_u128_accum_u64(&c128, p2);
580 secp256k1_u128_accum_u64(&c128, p4);
581 r->d[2] = secp256k1_u128_to_u64(&c128); secp256k1_u128_rshift(&c128, 64);
582 secp256k1_u128_accum_u64(&c128, p3);
583 r->d[3] = secp256k1_u128_to_u64(&c128);
584 c = secp256k1_u128_hi_u64(&c128);
585#endif
586
587 /* Final reduction of r. */
589}
590
591static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
592#ifdef USE_ASM_X86_64
593 const uint64_t *pb = b->d;
594 __asm__ __volatile__(
595 /* Preload */
596 "movq 0(%%rdi), %%r15\n"
597 "movq 8(%%rdi), %%rbx\n"
598 "movq 16(%%rdi), %%rcx\n"
599 "movq 0(%%rdx), %%r11\n"
600 "movq 8(%%rdx), %%r12\n"
601 "movq 16(%%rdx), %%r13\n"
602 "movq 24(%%rdx), %%r14\n"
603 /* (rax,rdx) = a0 * b0 */
604 "movq %%r15, %%rax\n"
605 "mulq %%r11\n"
606 /* Extract l0 */
607 "movq %%rax, 0(%%rsi)\n"
608 /* (r8,r9,r10) = (rdx) */
609 "movq %%rdx, %%r8\n"
610 "xorq %%r9, %%r9\n"
611 "xorq %%r10, %%r10\n"
612 /* (r8,r9,r10) += a0 * b1 */
613 "movq %%r15, %%rax\n"
614 "mulq %%r12\n"
615 "addq %%rax, %%r8\n"
616 "adcq %%rdx, %%r9\n"
617 "adcq $0, %%r10\n"
618 /* (r8,r9,r10) += a1 * b0 */
619 "movq %%rbx, %%rax\n"
620 "mulq %%r11\n"
621 "addq %%rax, %%r8\n"
622 "adcq %%rdx, %%r9\n"
623 "adcq $0, %%r10\n"
624 /* Extract l1 */
625 "movq %%r8, 8(%%rsi)\n"
626 "xorq %%r8, %%r8\n"
627 /* (r9,r10,r8) += a0 * b2 */
628 "movq %%r15, %%rax\n"
629 "mulq %%r13\n"
630 "addq %%rax, %%r9\n"
631 "adcq %%rdx, %%r10\n"
632 "adcq $0, %%r8\n"
633 /* (r9,r10,r8) += a1 * b1 */
634 "movq %%rbx, %%rax\n"
635 "mulq %%r12\n"
636 "addq %%rax, %%r9\n"
637 "adcq %%rdx, %%r10\n"
638 "adcq $0, %%r8\n"
639 /* (r9,r10,r8) += a2 * b0 */
640 "movq %%rcx, %%rax\n"
641 "mulq %%r11\n"
642 "addq %%rax, %%r9\n"
643 "adcq %%rdx, %%r10\n"
644 "adcq $0, %%r8\n"
645 /* Extract l2 */
646 "movq %%r9, 16(%%rsi)\n"
647 "xorq %%r9, %%r9\n"
648 /* (r10,r8,r9) += a0 * b3 */
649 "movq %%r15, %%rax\n"
650 "mulq %%r14\n"
651 "addq %%rax, %%r10\n"
652 "adcq %%rdx, %%r8\n"
653 "adcq $0, %%r9\n"
654 /* Preload a3 */
655 "movq 24(%%rdi), %%r15\n"
656 /* (r10,r8,r9) += a1 * b2 */
657 "movq %%rbx, %%rax\n"
658 "mulq %%r13\n"
659 "addq %%rax, %%r10\n"
660 "adcq %%rdx, %%r8\n"
661 "adcq $0, %%r9\n"
662 /* (r10,r8,r9) += a2 * b1 */
663 "movq %%rcx, %%rax\n"
664 "mulq %%r12\n"
665 "addq %%rax, %%r10\n"
666 "adcq %%rdx, %%r8\n"
667 "adcq $0, %%r9\n"
668 /* (r10,r8,r9) += a3 * b0 */
669 "movq %%r15, %%rax\n"
670 "mulq %%r11\n"
671 "addq %%rax, %%r10\n"
672 "adcq %%rdx, %%r8\n"
673 "adcq $0, %%r9\n"
674 /* Extract l3 */
675 "movq %%r10, 24(%%rsi)\n"
676 "xorq %%r10, %%r10\n"
677 /* (r8,r9,r10) += a1 * b3 */
678 "movq %%rbx, %%rax\n"
679 "mulq %%r14\n"
680 "addq %%rax, %%r8\n"
681 "adcq %%rdx, %%r9\n"
682 "adcq $0, %%r10\n"
683 /* (r8,r9,r10) += a2 * b2 */
684 "movq %%rcx, %%rax\n"
685 "mulq %%r13\n"
686 "addq %%rax, %%r8\n"
687 "adcq %%rdx, %%r9\n"
688 "adcq $0, %%r10\n"
689 /* (r8,r9,r10) += a3 * b1 */
690 "movq %%r15, %%rax\n"
691 "mulq %%r12\n"
692 "addq %%rax, %%r8\n"
693 "adcq %%rdx, %%r9\n"
694 "adcq $0, %%r10\n"
695 /* Extract l4 */
696 "movq %%r8, 32(%%rsi)\n"
697 "xorq %%r8, %%r8\n"
698 /* (r9,r10,r8) += a2 * b3 */
699 "movq %%rcx, %%rax\n"
700 "mulq %%r14\n"
701 "addq %%rax, %%r9\n"
702 "adcq %%rdx, %%r10\n"
703 "adcq $0, %%r8\n"
704 /* (r9,r10,r8) += a3 * b2 */
705 "movq %%r15, %%rax\n"
706 "mulq %%r13\n"
707 "addq %%rax, %%r9\n"
708 "adcq %%rdx, %%r10\n"
709 "adcq $0, %%r8\n"
710 /* Extract l5 */
711 "movq %%r9, 40(%%rsi)\n"
712 /* (r10,r8) += a3 * b3 */
713 "movq %%r15, %%rax\n"
714 "mulq %%r14\n"
715 "addq %%rax, %%r10\n"
716 "adcq %%rdx, %%r8\n"
717 /* Extract l6 */
718 "movq %%r10, 48(%%rsi)\n"
719 /* Extract l7 */
720 "movq %%r8, 56(%%rsi)\n"
721 : "+d"(pb)
722 : "S"(l), "D"(a->d)
723 : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
724#else
725 /* 160 bit accumulator. */
726 uint64_t c0 = 0, c1 = 0;
727 uint32_t c2 = 0;
728
729 /* l[0..7] = a[0..3] * b[0..3]. */
730 muladd_fast(a->d[0], b->d[0]);
731 extract_fast(l[0]);
732 muladd(a->d[0], b->d[1]);
733 muladd(a->d[1], b->d[0]);
734 extract(l[1]);
735 muladd(a->d[0], b->d[2]);
736 muladd(a->d[1], b->d[1]);
737 muladd(a->d[2], b->d[0]);
738 extract(l[2]);
739 muladd(a->d[0], b->d[3]);
740 muladd(a->d[1], b->d[2]);
741 muladd(a->d[2], b->d[1]);
742 muladd(a->d[3], b->d[0]);
743 extract(l[3]);
744 muladd(a->d[1], b->d[3]);
745 muladd(a->d[2], b->d[2]);
746 muladd(a->d[3], b->d[1]);
747 extract(l[4]);
748 muladd(a->d[2], b->d[3]);
749 muladd(a->d[3], b->d[2]);
750 extract(l[5]);
751 muladd_fast(a->d[3], b->d[3]);
752 extract_fast(l[6]);
753 VERIFY_CHECK(c1 == 0);
754 l[7] = c0;
755#endif
756}
757
758#undef sumadd
759#undef sumadd_fast
760#undef muladd
761#undef muladd_fast
762#undef extract
763#undef extract_fast
764
766 uint64_t l[8];
769}
770
772 int ret;
773 VERIFY_CHECK(n > 0);
774 VERIFY_CHECK(n < 16);
775 ret = r->d[0] & ((1 << n) - 1);
776 r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
777 r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
778 r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
779 r->d[3] = (r->d[3] >> n);
780 return ret;
781}
782
784 r1->d[0] = k->d[0];
785 r1->d[1] = k->d[1];
786 r1->d[2] = 0;
787 r1->d[3] = 0;
788 r2->d[0] = k->d[2];
789 r2->d[1] = k->d[3];
790 r2->d[2] = 0;
791 r2->d[3] = 0;
792}
793
795 return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
796}
797
799 uint64_t l[8];
800 unsigned int shiftlimbs;
801 unsigned int shiftlow;
802 unsigned int shifthigh;
803 VERIFY_CHECK(shift >= 256);
805 shiftlimbs = shift >> 6;
806 shiftlow = shift & 0x3F;
807 shifthigh = 64 - shiftlow;
808 r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
809 r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
810 r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
811 r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
812 secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
813}
814
816 uint64_t mask0, mask1;
817 volatile int vflag = flag;
818 SECP256K1_CHECKMEM_CHECK_VERIFY(r->d, sizeof(r->d));
819 mask0 = vflag + ~((uint64_t)0);
820 mask1 = ~mask0;
821 r->d[0] = (r->d[0] & mask0) | (a->d[0] & mask1);
822 r->d[1] = (r->d[1] & mask0) | (a->d[1] & mask1);
823 r->d[2] = (r->d[2] & mask0) | (a->d[2] & mask1);
824 r->d[3] = (r->d[3] & mask0) | (a->d[3] & mask1);
825}
826
828 const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
829
830 /* The output from secp256k1_modinv64{_var} should be normalized to range [0,modulus), and
831 * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
832 */
833 VERIFY_CHECK(a0 >> 62 == 0);
834 VERIFY_CHECK(a1 >> 62 == 0);
835 VERIFY_CHECK(a2 >> 62 == 0);
836 VERIFY_CHECK(a3 >> 62 == 0);
837 VERIFY_CHECK(a4 >> 8 == 0);
838
839 r->d[0] = a0 | a1 << 62;
840 r->d[1] = a1 >> 2 | a2 << 60;
841 r->d[2] = a2 >> 4 | a3 << 58;
842 r->d[3] = a3 >> 6 | a4 << 56;
843
844#ifdef VERIFY
846#endif
847}
848
850 const uint64_t M62 = UINT64_MAX >> 2;
851 const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3];
852
853#ifdef VERIFY
855#endif
856
857 r->v[0] = a0 & M62;
858 r->v[1] = (a0 >> 62 | a1 << 2) & M62;
859 r->v[2] = (a1 >> 60 | a2 << 4) & M62;
860 r->v[3] = (a2 >> 58 | a3 << 6) & M62;
861 r->v[4] = a3 >> 56;
862}
863
865 {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}},
866 0x34F20099AA774EC1LL
867};
868
871#ifdef VERIFY
872 int zero_in = secp256k1_scalar_is_zero(x);
873#endif
877
878#ifdef VERIFY
880#endif
881}
882
885#ifdef VERIFY
886 int zero_in = secp256k1_scalar_is_zero(x);
887#endif
891
892#ifdef VERIFY
894#endif
895}
896
898 return !(a->d[0] & 1);
899}
900
901#endif /* SECP256K1_SCALAR_REPR_IMPL_H */
#define SECP256K1_CHECKMEM_CHECK_VERIFY(p, len)
Definition: checkmem.h:85
static SECP256K1_INLINE uint64_t secp256k1_u128_hi_u64(const secp256k1_uint128 *a)
static SECP256K1_INLINE void secp256k1_u128_from_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE void secp256k1_u128_rshift(secp256k1_uint128 *r, unsigned int n)
static SECP256K1_INLINE void secp256k1_u128_accum_u64(secp256k1_uint128 *r, uint64_t a)
static SECP256K1_INLINE void secp256k1_u128_accum_mul(secp256k1_uint128 *r, uint64_t a, uint64_t b)
static SECP256K1_INLINE uint64_t secp256k1_u128_to_u64(const secp256k1_uint128 *a)
static void secp256k1_modinv64(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static void secp256k1_modinv64_var(secp256k1_modinv64_signed62 *x, const secp256k1_modinv64_modinfo *modinfo)
static SECP256K1_INLINE int secp256k1_scalar_is_even(const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_check_overflow(const secp256k1_scalar *a)
static SECP256K1_INLINE void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift)
#define SECP256K1_N_3
static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *k)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
static SECP256K1_INLINE void secp256k1_scalar_clear(secp256k1_scalar *r)
#define extract(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
#define SECP256K1_N_C_2
static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow)
#define SECP256K1_N_C_1
static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x)
static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_scalar
#define sumadd_fast(a)
Add a to the number defined by (c0,c1).
static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar *a)
#define SECP256K1_N_1
static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l)
#define SECP256K1_N_2
#define SECP256K1_N_H_2
static void secp256k1_scalar_from_signed62(secp256k1_scalar *r, const secp256k1_modinv64_signed62 *a)
static SECP256K1_INLINE void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v)
static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b)
static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x)
#define SECP256K1_N_C_0
static SECP256K1_INLINE void secp256k1_scalar_cmov(secp256k1_scalar *r, const secp256k1_scalar *a, int flag)
#define extract_fast(n)
Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits.
#define muladd(a, b)
Add a*b to the number defined by (c0,c1,c2).
static void secp256k1_scalar_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_scalar *a)
#define SECP256K1_N_H_0
static SECP256K1_INLINE int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b)
static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define sumadd(a)
Add a to the number defined by (c0,c1,c2).
static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag)
static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b)
#define SECP256K1_N_H_1
static SECP256K1_INLINE int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow)
#define SECP256K1_N_0
static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a)
static SECP256K1_INLINE int secp256k1_scalar_is_zero(const secp256k1_scalar *a)
static int secp256k1_scalar_is_high(const secp256k1_scalar *a)
static SECP256K1_INLINE unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count)
#define SECP256K1_N_H_3
static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag)
#define muladd_fast(a, b)
Add a*b to the number defined by (c0,c1).
static SECP256K1_INLINE int secp256k1_scalar_is_one(const secp256k1_scalar *a)
static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n)
#define SECP256K1_INLINE
Definition: util.h:48
#define VERIFY_CHECK(cond)
Definition: util.h:130
A scalar modulo the group order of the secp256k1 curve.
Definition: scalar_4x64.h:13
uint64_t d[4]
Definition: scalar_4x64.h:14
static int count