summaryrefslogtreecommitdiff
path: root/crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h')
-rw-r--r--crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h451
1 files changed, 451 insertions, 0 deletions
diff --git a/crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h b/crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h
new file mode 100644
index 000000000..dd88f38c7
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/field_5x52_impl.h
@@ -0,0 +1,451 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_FIELD_REPR_IMPL_H_
+#define _SECP256K1_FIELD_REPR_IMPL_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include "util.h"
+#include "num.h"
+#include "field.h"
+
+#if defined(USE_ASM_X86_64)
+#include "field_5x52_asm_impl.h"
+#else
+#include "field_5x52_int128_impl.h"
+#endif
+
+/** Implements arithmetic modulo FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F,
+ * represented as 5 uint64_t's in base 2^52. The values are allowed to contain >52 each. In particular,
+ * each FieldElem has a 'magnitude' associated with it. Internally, a magnitude M means each element
+ * is at most M*(2^53-1), except the most significant one, which is limited to M*(2^49-1). All operations
+ * accept any input with magnitude at most M, and have different rules for propagating magnitude to their
+ * output.
+ */
+
+#ifdef VERIFY
+static void secp256k1_fe_verify(const secp256k1_fe *a) {
+ const uint64_t *d = a->n;
+ int m = a->normalized ? 1 : 2 * a->magnitude, r = 1;
+ /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
+ r &= (d[0] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[1] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[2] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[3] <= 0xFFFFFFFFFFFFFULL * m);
+ r &= (d[4] <= 0x0FFFFFFFFFFFFULL * m);
+ r &= (a->magnitude >= 0);
+ r &= (a->magnitude <= 2048);
+ if (a->normalized) {
+ r &= (a->magnitude <= 1);
+ if (r && (d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
+ r &= (d[0] < 0xFFFFEFFFFFC2FULL);
+ }
+ }
+ VERIFY_CHECK(r == 1);
+}
+#endif
+
+static void secp256k1_fe_normalize(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t m;
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ /* At most a single final reduction is needed; check if the value is >= the field characteristic */
+ x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
+ & (t0 >= 0xFFFFEFFFFFC2FULL));
+
+ /* Apply the final reduction (for constant-time behaviour, we do it always) */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
+
+ /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
+ VERIFY_CHECK(t4 >> 48 == x);
+
+ /* Mask off the possible multiple of 2^256 from the final reduction */
+ t4 &= 0x0FFFFFFFFFFFFULL;
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_normalize_weak(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_normalize_var(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t m;
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ /* At most a single final reduction is needed; check if the value is >= the field characteristic */
+ x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
+ & (t0 >= 0xFFFFEFFFFFC2FULL));
+
+ if (x) {
+ t0 += 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
+
+ /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
+ VERIFY_CHECK(t4 >> 48 == x);
+
+ /* Mask off the possible multiple of 2^256 from the final reduction */
+ t4 &= 0x0FFFFFFFFFFFFULL;
+ }
+
+ r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
+
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static int secp256k1_fe_normalizes_to_zero(secp256k1_fe *r) {
+ uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
+
+ /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
+ uint64_t z0, z1;
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+ t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0 = t0; z1 = t0 ^ 0x1000003D0ULL;
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
+ z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
+}
+
+static int secp256k1_fe_normalizes_to_zero_var(secp256k1_fe *r) {
+ uint64_t t0, t1, t2, t3, t4;
+ uint64_t z0, z1;
+ uint64_t x;
+
+ t0 = r->n[0];
+ t4 = r->n[4];
+
+ /* Reduce t4 at the start so there will be at most a single carry from the first pass */
+ x = t4 >> 48;
+
+ /* The first pass ensures the magnitude is 1, ... */
+ t0 += x * 0x1000003D1ULL;
+
+ /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
+ z0 = t0 & 0xFFFFFFFFFFFFFULL;
+ z1 = z0 ^ 0x1000003D0ULL;
+
+ /* Fast return path should catch the majority of cases */
+ if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
+ return 0;
+ }
+
+ t1 = r->n[1];
+ t2 = r->n[2];
+ t3 = r->n[3];
+
+ t4 &= 0x0FFFFFFFFFFFFULL;
+
+ t1 += (t0 >> 52);
+ t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
+ t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
+ t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
+ z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
+
+ /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
+ VERIFY_CHECK(t4 >> 49 == 0);
+
+ return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
+}
+
+SECP256K1_INLINE static void secp256k1_fe_set_int(secp256k1_fe *r, int a) {
+ r->n[0] = a;
+ r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static int secp256k1_fe_is_zero(const secp256k1_fe *a) {
+ const uint64_t *t = a->n;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
+}
+
+SECP256K1_INLINE static int secp256k1_fe_is_odd(const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ return a->n[0] & 1;
+}
+
+SECP256K1_INLINE static void secp256k1_fe_clear(secp256k1_fe *a) {
+ int i;
+#ifdef VERIFY
+ a->magnitude = 0;
+ a->normalized = 1;
+#endif
+ for (i=0; i<5; i++) {
+ a->n[i] = 0;
+ }
+}
+
+static int secp256k1_fe_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
+ int i;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ VERIFY_CHECK(b->normalized);
+ secp256k1_fe_verify(a);
+ secp256k1_fe_verify(b);
+#endif
+ for (i = 4; i >= 0; i--) {
+ if (a->n[i] > b->n[i]) {
+ return 1;
+ }
+ if (a->n[i] < b->n[i]) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int secp256k1_fe_set_b32(secp256k1_fe *r, const unsigned char *a) {
+ int i;
+ r->n[0] = r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
+ for (i=0; i<32; i++) {
+ int j;
+ for (j=0; j<2; j++) {
+ int limb = (8*i+4*j)/52;
+ int shift = (8*i+4*j)%52;
+ r->n[limb] |= (uint64_t)((a[31-i] >> (4*j)) & 0xF) << shift;
+ }
+ }
+ if (r->n[4] == 0x0FFFFFFFFFFFFULL && (r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL && r->n[0] >= 0xFFFFEFFFFFC2FULL) {
+ return 0;
+ }
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+ secp256k1_fe_verify(r);
+#endif
+ return 1;
+}
+
+/** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
+static void secp256k1_fe_get_b32(unsigned char *r, const secp256k1_fe *a) {
+ int i;
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+ secp256k1_fe_verify(a);
+#endif
+ for (i=0; i<32; i++) {
+ int j;
+ int c = 0;
+ for (j=0; j<2; j++) {
+ int limb = (8*i+4*j)/52;
+ int shift = (8*i+4*j)%52;
+ c |= ((a->n[limb] >> shift) & 0xF) << (4 * j);
+ }
+ r[31-i] = c;
+ }
+}
+
+SECP256K1_INLINE static void secp256k1_fe_negate(secp256k1_fe *r, const secp256k1_fe *a, int m) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= m);
+ secp256k1_fe_verify(a);
+#endif
+ r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
+ r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
+ r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
+ r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
+ r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
+#ifdef VERIFY
+ r->magnitude = m + 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static void secp256k1_fe_mul_int(secp256k1_fe *r, int a) {
+ r->n[0] *= a;
+ r->n[1] *= a;
+ r->n[2] *= a;
+ r->n[3] *= a;
+ r->n[4] *= a;
+#ifdef VERIFY
+ r->magnitude *= a;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+SECP256K1_INLINE static void secp256k1_fe_add(secp256k1_fe *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ secp256k1_fe_verify(a);
+#endif
+ r->n[0] += a->n[0];
+ r->n[1] += a->n[1];
+ r->n[2] += a->n[2];
+ r->n[3] += a->n[3];
+ r->n[4] += a->n[4];
+#ifdef VERIFY
+ r->magnitude += a->magnitude;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= 8);
+ VERIFY_CHECK(b->magnitude <= 8);
+ secp256k1_fe_verify(a);
+ secp256k1_fe_verify(b);
+ VERIFY_CHECK(r != b);
+#endif
+ secp256k1_fe_mul_inner(r->n, a->n, b->n);
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static void secp256k1_fe_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->magnitude <= 8);
+ secp256k1_fe_verify(a);
+#endif
+ secp256k1_fe_sqr_inner(r->n, a->n);
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 0;
+ secp256k1_fe_verify(r);
+#endif
+}
+
+static SECP256K1_INLINE void secp256k1_fe_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
+ uint64_t mask0, mask1;
+ mask0 = flag + ~((uint64_t)0);
+ mask1 = ~mask0;
+ r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
+ r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
+ r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
+ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
+ r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
+#ifdef VERIFY
+ if (a->magnitude > r->magnitude) {
+ r->magnitude = a->magnitude;
+ }
+ r->normalized &= a->normalized;
+#endif
+}
+
+static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
+ uint64_t mask0, mask1;
+ mask0 = flag + ~((uint64_t)0);
+ mask1 = ~mask0;
+ r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
+ r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
+ r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
+ r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
+}
+
+static void secp256k1_fe_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
+#ifdef VERIFY
+ VERIFY_CHECK(a->normalized);
+#endif
+ r->n[0] = a->n[0] | a->n[1] << 52;
+ r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
+ r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
+ r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
+}
+
+static SECP256K1_INLINE void secp256k1_fe_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
+ r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
+ r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
+ r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
+ r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
+ r->n[4] = a->n[3] >> 16;
+#ifdef VERIFY
+ r->magnitude = 1;
+ r->normalized = 1;
+#endif
+}
+
+#endif