7#ifndef SECP256K1_FIELD_REPR_IMPL_H
8#define SECP256K1_FIELD_REPR_IMPL_H
25 const uint32_t *d = a->
n;
26 int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
27 r &= (d[0] <= 0x3FFFFFFUL * m);
28 r &= (d[1] <= 0x3FFFFFFUL * m);
29 r &= (d[2] <= 0x3FFFFFFUL * m);
30 r &= (d[3] <= 0x3FFFFFFUL * m);
31 r &= (d[4] <= 0x3FFFFFFUL * m);
32 r &= (d[5] <= 0x3FFFFFFUL * m);
33 r &= (d[6] <= 0x3FFFFFFUL * m);
34 r &= (d[7] <= 0x3FFFFFFUL * m);
35 r &= (d[8] <= 0x3FFFFFFUL * m);
36 r &= (d[9] <= 0x03FFFFFUL * m);
37 r &= (a->magnitude >= 0);
38 r &= (a->magnitude <= 32);
40 r &= (a->magnitude <= 1);
41 if (r && (d[9] == 0x03FFFFFUL)) {
42 uint32_t mid = d[8] & d[7] & d[6] & d[5] & d[4] & d[3] & d[2];
43 if (mid == 0x3FFFFFFUL) {
44 r &= ((d[1] + 0x40UL + ((d[0] + 0x3D1UL) >> 26)) <= 0x3FFFFFFUL);
53 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
54 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
58 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
61 t0 += x * 0x3D1UL; t1 += (x << 6);
62 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
63 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
64 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
65 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
66 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
67 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
68 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
69 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
70 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
76 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
77 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
80 t0 += x * 0x3D1UL; t1 += (x << 6);
81 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
82 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
83 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
84 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
85 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
86 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
87 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
88 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
89 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
97 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
98 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
103 secp256k1_fe_verify(r);
108 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
109 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
112 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
115 t0 += x * 0x3D1UL; t1 += (x << 6);
116 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
117 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
118 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
119 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
120 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
121 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
122 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
123 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
124 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
129 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
130 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
134 secp256k1_fe_verify(r);
139 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
140 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
144 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
147 t0 += x * 0x3D1UL; t1 += (x << 6);
148 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
149 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
150 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; m = t2;
151 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; m &= t3;
152 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; m &= t4;
153 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; m &= t5;
154 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; m &= t6;
155 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; m &= t7;
156 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; m &= t8;
162 x = (t9 >> 22) | ((t9 == 0x03FFFFFUL) & (m == 0x3FFFFFFUL)
163 & ((t1 + 0x40UL + ((t0 + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
166 t0 += 0x3D1UL; t1 += (x << 6);
167 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL;
168 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL;
169 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL;
170 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL;
171 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL;
172 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL;
173 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL;
174 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL;
175 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL;
184 r->
n[0] = t0; r->
n[1] = t1; r->
n[2] = t2; r->
n[3] = t3; r->
n[4] = t4;
185 r->
n[5] = t5; r->
n[6] = t6; r->
n[7] = t7; r->
n[8] = t8; r->
n[9] = t9;
190 secp256k1_fe_verify(r);
195 uint32_t t0 = r->
n[0], t1 = r->
n[1], t2 = r->
n[2], t3 = r->
n[3], t4 = r->
n[4],
196 t5 = r->
n[5], t6 = r->
n[6], t7 = r->
n[7], t8 = r->
n[8], t9 = r->
n[9];
202 uint32_t x = t9 >> 22; t9 &= 0x03FFFFFUL;
205 t0 += x * 0x3D1UL; t1 += (x << 6);
206 t1 += (t0 >> 26); t0 &= 0x3FFFFFFUL; z0 = t0; z1 = t0 ^ 0x3D0UL;
207 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
208 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
209 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
210 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
211 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
212 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
213 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
214 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
215 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
220 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
224 uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9;
238 z0 = t0 & 0x3FFFFFFUL;
242 if ((z0 != 0UL) & (z1 != 0x3FFFFFFUL)) {
259 t2 += (t1 >> 26); t1 &= 0x3FFFFFFUL; z0 |= t1; z1 &= t1 ^ 0x40UL;
260 t3 += (t2 >> 26); t2 &= 0x3FFFFFFUL; z0 |= t2; z1 &= t2;
261 t4 += (t3 >> 26); t3 &= 0x3FFFFFFUL; z0 |= t3; z1 &= t3;
262 t5 += (t4 >> 26); t4 &= 0x3FFFFFFUL; z0 |= t4; z1 &= t4;
263 t6 += (t5 >> 26); t5 &= 0x3FFFFFFUL; z0 |= t5; z1 &= t5;
264 t7 += (t6 >> 26); t6 &= 0x3FFFFFFUL; z0 |= t6; z1 &= t6;
265 t8 += (t7 >> 26); t7 &= 0x3FFFFFFUL; z0 |= t7; z1 &= t7;
266 t9 += (t8 >> 26); t8 &= 0x3FFFFFFUL; z0 |= t8; z1 &= t8;
267 z0 |= t9; z1 &= t9 ^ 0x3C00000UL;
272 return (z0 == 0) | (z1 == 0x3FFFFFFUL);
278 r->
n[1] = r->
n[2] = r->
n[3] = r->
n[4] = r->
n[5] = r->
n[6] = r->
n[7] = r->
n[8] = r->
n[9] = 0;
280 r->magnitude = (a != 0);
282 secp256k1_fe_verify(r);
287 const uint32_t *t = a->
n;
290 secp256k1_fe_verify(a);
292 return (t[0] | t[1] | t[2] | t[3] | t[4] | t[5] | t[6] | t[7] | t[8] | t[9]) == 0;
298 secp256k1_fe_verify(a);
309 for (i=0; i<10; i++) {
319 secp256k1_fe_verify(a);
320 secp256k1_fe_verify(b);
322 for (i = 9; i >= 0; i--) {
323 if (a->
n[i] > b->
n[i]) {
326 if (a->
n[i] < b->
n[i]) {
335 r->
n[0] = (uint32_t)a[31] | ((uint32_t)a[30] << 8) | ((uint32_t)a[29] << 16) | ((uint32_t)(a[28] & 0x3) << 24);
336 r->
n[1] = (uint32_t)((a[28] >> 2) & 0x3f) | ((uint32_t)a[27] << 6) | ((uint32_t)a[26] << 14) | ((uint32_t)(a[25] & 0xf) << 22);
337 r->
n[2] = (uint32_t)((a[25] >> 4) & 0xf) | ((uint32_t)a[24] << 4) | ((uint32_t)a[23] << 12) | ((uint32_t)(a[22] & 0x3f) << 20);
338 r->
n[3] = (uint32_t)((a[22] >> 6) & 0x3) | ((uint32_t)a[21] << 2) | ((uint32_t)a[20] << 10) | ((uint32_t)a[19] << 18);
339 r->
n[4] = (uint32_t)a[18] | ((uint32_t)a[17] << 8) | ((uint32_t)a[16] << 16) | ((uint32_t)(a[15] & 0x3) << 24);
340 r->
n[5] = (uint32_t)((a[15] >> 2) & 0x3f) | ((uint32_t)a[14] << 6) | ((uint32_t)a[13] << 14) | ((uint32_t)(a[12] & 0xf) << 22);
341 r->
n[6] = (uint32_t)((a[12] >> 4) & 0xf) | ((uint32_t)a[11] << 4) | ((uint32_t)a[10] << 12) | ((uint32_t)(a[9] & 0x3f) << 20);
342 r->
n[7] = (uint32_t)((a[9] >> 6) & 0x3) | ((uint32_t)a[8] << 2) | ((uint32_t)a[7] << 10) | ((uint32_t)a[6] << 18);
343 r->
n[8] = (uint32_t)a[5] | ((uint32_t)a[4] << 8) | ((uint32_t)a[3] << 16) | ((uint32_t)(a[2] & 0x3) << 24);
344 r->
n[9] = (uint32_t)((a[2] >> 2) & 0x3f) | ((uint32_t)a[1] << 6) | ((uint32_t)a[0] << 14);
346 ret = !((r->
n[9] == 0x3FFFFFUL) & ((r->
n[8] & r->
n[7] & r->
n[6] & r->
n[5] & r->
n[4] & r->
n[3] & r->
n[2]) == 0x3FFFFFFUL) & ((r->
n[1] + 0x40UL + ((r->
n[0] + 0x3D1UL) >> 26)) > 0x3FFFFFFUL));
351 secp256k1_fe_verify(r);
363 secp256k1_fe_verify(a);
365 r[0] = (a->
n[9] >> 14) & 0xff;
366 r[1] = (a->
n[9] >> 6) & 0xff;
367 r[2] = ((a->
n[9] & 0x3F) << 2) | ((a->
n[8] >> 24) & 0x3);
368 r[3] = (a->
n[8] >> 16) & 0xff;
369 r[4] = (a->
n[8] >> 8) & 0xff;
370 r[5] = a->
n[8] & 0xff;
371 r[6] = (a->
n[7] >> 18) & 0xff;
372 r[7] = (a->
n[7] >> 10) & 0xff;
373 r[8] = (a->
n[7] >> 2) & 0xff;
374 r[9] = ((a->
n[7] & 0x3) << 6) | ((a->
n[6] >> 20) & 0x3f);
375 r[10] = (a->
n[6] >> 12) & 0xff;
376 r[11] = (a->
n[6] >> 4) & 0xff;
377 r[12] = ((a->
n[6] & 0xf) << 4) | ((a->
n[5] >> 22) & 0xf);
378 r[13] = (a->
n[5] >> 14) & 0xff;
379 r[14] = (a->
n[5] >> 6) & 0xff;
380 r[15] = ((a->
n[5] & 0x3f) << 2) | ((a->
n[4] >> 24) & 0x3);
381 r[16] = (a->
n[4] >> 16) & 0xff;
382 r[17] = (a->
n[4] >> 8) & 0xff;
383 r[18] = a->
n[4] & 0xff;
384 r[19] = (a->
n[3] >> 18) & 0xff;
385 r[20] = (a->
n[3] >> 10) & 0xff;
386 r[21] = (a->
n[3] >> 2) & 0xff;
387 r[22] = ((a->
n[3] & 0x3) << 6) | ((a->
n[2] >> 20) & 0x3f);
388 r[23] = (a->
n[2] >> 12) & 0xff;
389 r[24] = (a->
n[2] >> 4) & 0xff;
390 r[25] = ((a->
n[2] & 0xf) << 4) | ((a->
n[1] >> 22) & 0xf);
391 r[26] = (a->
n[1] >> 14) & 0xff;
392 r[27] = (a->
n[1] >> 6) & 0xff;
393 r[28] = ((a->
n[1] & 0x3f) << 2) | ((a->
n[0] >> 24) & 0x3);
394 r[29] = (a->
n[0] >> 16) & 0xff;
395 r[30] = (a->
n[0] >> 8) & 0xff;
396 r[31] = a->
n[0] & 0xff;
402 secp256k1_fe_verify(a);
403 VERIFY_CHECK(0x3FFFC2FUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
404 VERIFY_CHECK(0x3FFFFBFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
405 VERIFY_CHECK(0x3FFFFFFUL * 2 * (m + 1) >= 0x3FFFFFFUL * 2 * m);
406 VERIFY_CHECK(0x03FFFFFUL * 2 * (m + 1) >= 0x03FFFFFUL * 2 * m);
408 r->
n[0] = 0x3FFFC2FUL * 2 * (m + 1) - a->
n[0];
409 r->
n[1] = 0x3FFFFBFUL * 2 * (m + 1) - a->
n[1];
410 r->
n[2] = 0x3FFFFFFUL * 2 * (m + 1) - a->
n[2];
411 r->
n[3] = 0x3FFFFFFUL * 2 * (m + 1) - a->
n[3];
412 r->
n[4] = 0x3FFFFFFUL * 2 * (m + 1) - a->
n[4];
413 r->
n[5] = 0x3FFFFFFUL * 2 * (m + 1) - a->
n[5];
414 r->
n[6] = 0x3FFFFFFUL * 2 * (m + 1) - a->
n[6];
415 r->
n[7] = 0x3FFFFFFUL * 2 * (m + 1) - a->
n[7];
416 r->
n[8] = 0x3FFFFFFUL * 2 * (m + 1) - a->
n[8];
417 r->
n[9] = 0x03FFFFFUL * 2 * (m + 1) - a->
n[9];
419 r->magnitude = m + 1;
421 secp256k1_fe_verify(r);
439 secp256k1_fe_verify(r);
445 secp256k1_fe_verify(a);
458 r->magnitude += a->magnitude;
460 secp256k1_fe_verify(r);
464#if defined(USE_EXTERNAL_ASM)
473#define VERIFY_BITS(x, n) VERIFY_CHECK(((x) >> (n)) == 0)
475#define VERIFY_BITS(x, n) do { } while(0)
480 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
481 uint32_t t9, t1, t0, t2, t3, t4, t5, t6, t7;
482 const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
511 d = (uint64_t)a[0] * b[9]
512 + (uint64_t)a[1] * b[8]
513 + (uint64_t)a[2] * b[7]
514 + (uint64_t)a[3] * b[6]
515 + (uint64_t)a[4] * b[5]
516 + (uint64_t)a[5] * b[4]
517 + (uint64_t)a[6] * b[3]
518 + (uint64_t)a[7] * b[2]
519 + (uint64_t)a[8] * b[1]
520 + (uint64_t)a[9] * b[0];
523 t9 = d & M; d >>= 26;
528 c = (uint64_t)a[0] * b[0];
531 d += (uint64_t)a[1] * b[9]
532 + (uint64_t)a[2] * b[8]
533 + (uint64_t)a[3] * b[7]
534 + (uint64_t)a[4] * b[6]
535 + (uint64_t)a[5] * b[5]
536 + (uint64_t)a[6] * b[4]
537 + (uint64_t)a[7] * b[3]
538 + (uint64_t)a[8] * b[2]
539 + (uint64_t)a[9] * b[1];
542 u0 = d & M; d >>= 26; c += u0 * R0;
547 t0 = c & M; c >>= 26; c += u0 * R1;
553 c += (uint64_t)a[0] * b[1]
554 + (uint64_t)a[1] * b[0];
557 d += (uint64_t)a[2] * b[9]
558 + (uint64_t)a[3] * b[8]
559 + (uint64_t)a[4] * b[7]
560 + (uint64_t)a[5] * b[6]
561 + (uint64_t)a[6] * b[5]
562 + (uint64_t)a[7] * b[4]
563 + (uint64_t)a[8] * b[3]
564 + (uint64_t)a[9] * b[2];
567 u1 = d & M; d >>= 26; c += u1 * R0;
572 t1 = c & M; c >>= 26; c += u1 * R1;
578 c += (uint64_t)a[0] * b[2]
579 + (uint64_t)a[1] * b[1]
580 + (uint64_t)a[2] * b[0];
583 d += (uint64_t)a[3] * b[9]
584 + (uint64_t)a[4] * b[8]
585 + (uint64_t)a[5] * b[7]
586 + (uint64_t)a[6] * b[6]
587 + (uint64_t)a[7] * b[5]
588 + (uint64_t)a[8] * b[4]
589 + (uint64_t)a[9] * b[3];
592 u2 = d & M; d >>= 26; c += u2 * R0;
597 t2 = c & M; c >>= 26; c += u2 * R1;
603 c += (uint64_t)a[0] * b[3]
604 + (uint64_t)a[1] * b[2]
605 + (uint64_t)a[2] * b[1]
606 + (uint64_t)a[3] * b[0];
609 d += (uint64_t)a[4] * b[9]
610 + (uint64_t)a[5] * b[8]
611 + (uint64_t)a[6] * b[7]
612 + (uint64_t)a[7] * b[6]
613 + (uint64_t)a[8] * b[5]
614 + (uint64_t)a[9] * b[4];
617 u3 = d & M; d >>= 26; c += u3 * R0;
622 t3 = c & M; c >>= 26; c += u3 * R1;
628 c += (uint64_t)a[0] * b[4]
629 + (uint64_t)a[1] * b[3]
630 + (uint64_t)a[2] * b[2]
631 + (uint64_t)a[3] * b[1]
632 + (uint64_t)a[4] * b[0];
635 d += (uint64_t)a[5] * b[9]
636 + (uint64_t)a[6] * b[8]
637 + (uint64_t)a[7] * b[7]
638 + (uint64_t)a[8] * b[6]
639 + (uint64_t)a[9] * b[5];
642 u4 = d & M; d >>= 26; c += u4 * R0;
647 t4 = c & M; c >>= 26; c += u4 * R1;
653 c += (uint64_t)a[0] * b[5]
654 + (uint64_t)a[1] * b[4]
655 + (uint64_t)a[2] * b[3]
656 + (uint64_t)a[3] * b[2]
657 + (uint64_t)a[4] * b[1]
658 + (uint64_t)a[5] * b[0];
661 d += (uint64_t)a[6] * b[9]
662 + (uint64_t)a[7] * b[8]
663 + (uint64_t)a[8] * b[7]
664 + (uint64_t)a[9] * b[6];
667 u5 = d & M; d >>= 26; c += u5 * R0;
672 t5 = c & M; c >>= 26; c += u5 * R1;
678 c += (uint64_t)a[0] * b[6]
679 + (uint64_t)a[1] * b[5]
680 + (uint64_t)a[2] * b[4]
681 + (uint64_t)a[3] * b[3]
682 + (uint64_t)a[4] * b[2]
683 + (uint64_t)a[5] * b[1]
684 + (uint64_t)a[6] * b[0];
687 d += (uint64_t)a[7] * b[9]
688 + (uint64_t)a[8] * b[8]
689 + (uint64_t)a[9] * b[7];
692 u6 = d & M; d >>= 26; c += u6 * R0;
697 t6 = c & M; c >>= 26; c += u6 * R1;
703 c += (uint64_t)a[0] * b[7]
704 + (uint64_t)a[1] * b[6]
705 + (uint64_t)a[2] * b[5]
706 + (uint64_t)a[3] * b[4]
707 + (uint64_t)a[4] * b[3]
708 + (uint64_t)a[5] * b[2]
709 + (uint64_t)a[6] * b[1]
710 + (uint64_t)a[7] * b[0];
714 d += (uint64_t)a[8] * b[9]
715 + (uint64_t)a[9] * b[8];
718 u7 = d & M; d >>= 26; c += u7 * R0;
724 t7 = c & M; c >>= 26; c += u7 * R1;
730 c += (uint64_t)a[0] * b[8]
731 + (uint64_t)a[1] * b[7]
732 + (uint64_t)a[2] * b[6]
733 + (uint64_t)a[3] * b[5]
734 + (uint64_t)a[4] * b[4]
735 + (uint64_t)a[5] * b[3]
736 + (uint64_t)a[6] * b[2]
737 + (uint64_t)a[7] * b[1]
738 + (uint64_t)a[8] * b[0];
742 d += (uint64_t)a[9] * b[9];
745 u8 = d & M; d >>= 26; c += u8 * R0;
768 r[8] = c & M; c >>= 26; c += u8 * R1;
776 r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4);
783 d = c * (R0 >> 4) + t0;
786 r[0] = d & M; d >>= 26;
790 d += c * (R1 >> 4) + t1;
795 r[1] = d & M; d >>= 26;
810 uint64_t u0, u1, u2, u3, u4, u5, u6, u7, u8;
811 uint32_t t9, t0, t1, t2, t3, t4, t5, t6, t7;
812 const uint32_t M = 0x3FFFFFFUL, R0 = 0x3D10UL, R1 = 0x400UL;
830 d = (uint64_t)(a[0]*2) * a[9]
831 + (uint64_t)(a[1]*2) * a[8]
832 + (uint64_t)(a[2]*2) * a[7]
833 + (uint64_t)(a[3]*2) * a[6]
834 + (uint64_t)(a[4]*2) * a[5];
837 t9 = d & M; d >>= 26;
842 c = (uint64_t)a[0] * a[0];
845 d += (uint64_t)(a[1]*2) * a[9]
846 + (uint64_t)(a[2]*2) * a[8]
847 + (uint64_t)(a[3]*2) * a[7]
848 + (uint64_t)(a[4]*2) * a[6]
849 + (uint64_t)a[5] * a[5];
852 u0 = d & M; d >>= 26; c += u0 * R0;
857 t0 = c & M; c >>= 26; c += u0 * R1;
863 c += (uint64_t)(a[0]*2) * a[1];
866 d += (uint64_t)(a[2]*2) * a[9]
867 + (uint64_t)(a[3]*2) * a[8]
868 + (uint64_t)(a[4]*2) * a[7]
869 + (uint64_t)(a[5]*2) * a[6];
872 u1 = d & M; d >>= 26; c += u1 * R0;
877 t1 = c & M; c >>= 26; c += u1 * R1;
883 c += (uint64_t)(a[0]*2) * a[2]
884 + (uint64_t)a[1] * a[1];
887 d += (uint64_t)(a[3]*2) * a[9]
888 + (uint64_t)(a[4]*2) * a[8]
889 + (uint64_t)(a[5]*2) * a[7]
890 + (uint64_t)a[6] * a[6];
893 u2 = d & M; d >>= 26; c += u2 * R0;
898 t2 = c & M; c >>= 26; c += u2 * R1;
904 c += (uint64_t)(a[0]*2) * a[3]
905 + (uint64_t)(a[1]*2) * a[2];
908 d += (uint64_t)(a[4]*2) * a[9]
909 + (uint64_t)(a[5]*2) * a[8]
910 + (uint64_t)(a[6]*2) * a[7];
913 u3 = d & M; d >>= 26; c += u3 * R0;
918 t3 = c & M; c >>= 26; c += u3 * R1;
924 c += (uint64_t)(a[0]*2) * a[4]
925 + (uint64_t)(a[1]*2) * a[3]
926 + (uint64_t)a[2] * a[2];
929 d += (uint64_t)(a[5]*2) * a[9]
930 + (uint64_t)(a[6]*2) * a[8]
931 + (uint64_t)a[7] * a[7];
934 u4 = d & M; d >>= 26; c += u4 * R0;
939 t4 = c & M; c >>= 26; c += u4 * R1;
945 c += (uint64_t)(a[0]*2) * a[5]
946 + (uint64_t)(a[1]*2) * a[4]
947 + (uint64_t)(a[2]*2) * a[3];
950 d += (uint64_t)(a[6]*2) * a[9]
951 + (uint64_t)(a[7]*2) * a[8];
954 u5 = d & M; d >>= 26; c += u5 * R0;
959 t5 = c & M; c >>= 26; c += u5 * R1;
965 c += (uint64_t)(a[0]*2) * a[6]
966 + (uint64_t)(a[1]*2) * a[5]
967 + (uint64_t)(a[2]*2) * a[4]
968 + (uint64_t)a[3] * a[3];
971 d += (uint64_t)(a[7]*2) * a[9]
972 + (uint64_t)a[8] * a[8];
975 u6 = d & M; d >>= 26; c += u6 * R0;
980 t6 = c & M; c >>= 26; c += u6 * R1;
986 c += (uint64_t)(a[0]*2) * a[7]
987 + (uint64_t)(a[1]*2) * a[6]
988 + (uint64_t)(a[2]*2) * a[5]
989 + (uint64_t)(a[3]*2) * a[4];
993 d += (uint64_t)(a[8]*2) * a[9];
996 u7 = d & M; d >>= 26; c += u7 * R0;
1002 t7 = c & M; c >>= 26; c += u7 * R1;
1008 c += (uint64_t)(a[0]*2) * a[8]
1009 + (uint64_t)(a[1]*2) * a[7]
1010 + (uint64_t)(a[2]*2) * a[6]
1011 + (uint64_t)(a[3]*2) * a[5]
1012 + (uint64_t)a[4] * a[4];
1016 d += (uint64_t)a[9] * a[9];
1019 u8 = d & M; d >>= 26; c += u8 * R0;
1042 r[8] = c & M; c >>= 26; c += u8 * R1;
1050 r[9] = c & (M >> 4); c >>= 22; c += d * (R1 << 4);
1057 d = c * (R0 >> 4) + t0;
1060 r[0] = d & M; d >>= 26;
1064 d += c * (R1 >> 4) + t1;
1069 r[1] = d & M; d >>= 26;
1087 secp256k1_fe_verify(a);
1088 secp256k1_fe_verify(b);
1096 secp256k1_fe_verify(r);
1103 secp256k1_fe_verify(a);
1109 secp256k1_fe_verify(r);
1114 uint32_t mask0, mask1;
1115 volatile int vflag = flag;
1117 mask0 = vflag + ~((uint32_t)0);
1119 r->
n[0] = (r->
n[0] & mask0) | (a->
n[0] & mask1);
1120 r->
n[1] = (r->
n[1] & mask0) | (a->
n[1] & mask1);
1121 r->
n[2] = (r->
n[2] & mask0) | (a->
n[2] & mask1);
1122 r->
n[3] = (r->
n[3] & mask0) | (a->
n[3] & mask1);
1123 r->
n[4] = (r->
n[4] & mask0) | (a->
n[4] & mask1);
1124 r->
n[5] = (r->
n[5] & mask0) | (a->
n[5] & mask1);
1125 r->
n[6] = (r->
n[6] & mask0) | (a->
n[6] & mask1);
1126 r->
n[7] = (r->
n[7] & mask0) | (a->
n[7] & mask1);
1127 r->
n[8] = (r->
n[8] & mask0) | (a->
n[8] & mask1);
1128 r->
n[9] = (r->
n[9] & mask0) | (a->
n[9] & mask1);
1131 r->magnitude = a->magnitude;
1132 r->normalized = a->normalized;
1138 uint32_t mask0, mask1;
1139 volatile int vflag = flag;
1141 mask0 = vflag + ~((uint32_t)0);
1143 r->
n[0] = (r->
n[0] & mask0) | (a->
n[0] & mask1);
1144 r->
n[1] = (r->
n[1] & mask0) | (a->
n[1] & mask1);
1145 r->
n[2] = (r->
n[2] & mask0) | (a->
n[2] & mask1);
1146 r->
n[3] = (r->
n[3] & mask0) | (a->
n[3] & mask1);
1147 r->
n[4] = (r->
n[4] & mask0) | (a->
n[4] & mask1);
1148 r->
n[5] = (r->
n[5] & mask0) | (a->
n[5] & mask1);
1149 r->
n[6] = (r->
n[6] & mask0) | (a->
n[6] & mask1);
1150 r->
n[7] = (r->
n[7] & mask0) | (a->
n[7] & mask1);
1157 r->
n[0] = a->
n[0] | a->
n[1] << 26;
1158 r->
n[1] = a->
n[1] >> 6 | a->
n[2] << 20;
1159 r->
n[2] = a->
n[2] >> 12 | a->
n[3] << 14;
1160 r->
n[3] = a->
n[3] >> 18 | a->
n[4] << 8;
1161 r->
n[4] = a->
n[4] >> 24 | a->
n[5] << 2 | a->
n[6] << 28;
1162 r->
n[5] = a->
n[6] >> 4 | a->
n[7] << 22;
1163 r->
n[6] = a->
n[7] >> 10 | a->
n[8] << 16;
1164 r->
n[7] = a->
n[8] >> 16 | a->
n[9] << 10;
1168 r->
n[0] = a->
n[0] & 0x3FFFFFFUL;
1169 r->
n[1] = a->
n[0] >> 26 | ((a->
n[1] << 6) & 0x3FFFFFFUL);
1170 r->
n[2] = a->
n[1] >> 20 | ((a->
n[2] << 12) & 0x3FFFFFFUL);
1171 r->
n[3] = a->
n[2] >> 14 | ((a->
n[3] << 18) & 0x3FFFFFFUL);
1172 r->
n[4] = a->
n[3] >> 8 | ((a->
n[4] << 24) & 0x3FFFFFFUL);
1173 r->
n[5] = (a->
n[4] >> 2) & 0x3FFFFFFUL;
1174 r->
n[6] = a->
n[4] >> 28 | ((a->
n[5] << 4) & 0x3FFFFFFUL);
1175 r->
n[7] = a->
n[5] >> 22 | ((a->
n[6] << 10) & 0x3FFFFFFUL);
1176 r->
n[8] = a->
n[6] >> 16 | ((a->
n[7] << 16) & 0x3FFFFFFUL);
1177 r->
n[9] = a->
n[7] >> 10;
1181 secp256k1_fe_verify(r);
1186 const uint32_t M26 = UINT32_MAX >> 6;
1187 const uint32_t a0 = a->
v[0], a1 = a->
v[1], a2 = a->
v[2], a3 = a->
v[3], a4 = a->
v[4],
1188 a5 = a->
v[5], a6 = a->
v[6], a7 = a->
v[7], a8 = a->
v[8];
1204 r->
n[1] = (a0 >> 26 | a1 << 4) & M26;
1205 r->
n[2] = (a1 >> 22 | a2 << 8) & M26;
1206 r->
n[3] = (a2 >> 18 | a3 << 12) & M26;
1207 r->
n[4] = (a3 >> 14 | a4 << 16) & M26;
1208 r->
n[5] = (a4 >> 10 | a5 << 20) & M26;
1209 r->
n[6] = (a5 >> 6 | a6 << 24) & M26;
1210 r->
n[7] = (a6 >> 2 ) & M26;
1211 r->
n[8] = (a6 >> 28 | a7 << 2) & M26;
1212 r->
n[9] = (a7 >> 24 | a8 << 6);
1217 secp256k1_fe_verify(r);
1222 const uint32_t M30 = UINT32_MAX >> 2;
1223 const uint64_t a0 = a->
n[0], a1 = a->
n[1], a2 = a->
n[2], a3 = a->
n[3], a4 = a->
n[4],
1224 a5 = a->
n[5], a6 = a->
n[6], a7 = a->
n[7], a8 = a->
n[8], a9 = a->
n[9];
1230 r->
v[0] = (a0 | a1 << 26) & M30;
1231 r->
v[1] = (a1 >> 4 | a2 << 22) & M30;
1232 r->
v[2] = (a2 >> 8 | a3 << 18) & M30;
1233 r->
v[3] = (a3 >> 12 | a4 << 14) & M30;
1234 r->
v[4] = (a4 >> 16 | a5 << 10) & M30;
1235 r->
v[5] = (a5 >> 20 | a6 << 6) & M30;
1236 r->
v[6] = (a6 >> 24 | a7 << 2
1238 r->
v[7] = (a8 >> 2 | a9 << 24) & M30;
1243 {{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}},
static int secp256k1_fe_normalizes_to_zero_var(const secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_set_int(secp256k1_fe *r, int a)
static void secp256k1_fe_normalize_weak(secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_sqr_inner(uint32_t *r, const uint32_t *a)
static const secp256k1_modinv32_modinfo secp256k1_const_modinfo_fe
static SECP256K1_INLINE int secp256k1_fe_is_zero(const secp256k1_fe *a)
static void secp256k1_fe_normalize_var(secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_mul_int(secp256k1_fe *r, int a)
static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe *SECP256K1_RESTRICT b)
static SECP256K1_INLINE void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t *a, const uint32_t *SECP256K1_RESTRICT b)
static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a)
static void secp256k1_fe_from_signed30(secp256k1_fe *r, const secp256k1_modinv32_signed30 *a)
static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag)
static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag)
static int secp256k1_fe_normalizes_to_zero(const secp256k1_fe *r)
static SECP256K1_INLINE void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m)
static void secp256k1_fe_normalize(secp256k1_fe *r)
See the comment at the top of field_5x52_impl.h for more details.
#define VERIFY_BITS(x, n)
static SECP256K1_INLINE int secp256k1_fe_is_odd(const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_clear(secp256k1_fe *a)
static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a)
static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a)
static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a)
Convert a field element to a 32-byte big endian value.
static SECP256K1_INLINE void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a)
static void secp256k1_fe_to_signed30(secp256k1_modinv32_signed30 *r, const secp256k1_fe *a)
static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b)
static void secp256k1_fe_inv(secp256k1_fe *r, const secp256k1_fe *x)
static void secp256k1_fe_inv_var(secp256k1_fe *r, const secp256k1_fe *x)
static void secp256k1_modinv32_var(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo)
static void secp256k1_modinv32(secp256k1_modinv32_signed30 *x, const secp256k1_modinv32_modinfo *modinfo)
#define VG_CHECK_VERIFY(x, y)
#define VERIFY_CHECK(cond)
#define SECP256K1_RESTRICT