diff --git a/src/include.am b/src/include.am index 50eb9260d..9c42dadad 100644 --- a/src/include.am +++ b/src/include.am @@ -180,9 +180,12 @@ endif endif if BUILD_FASTMATH +if BUILD_FIPS src_libwolfssl_la_SOURCES += ctaocrypt/src/tfm.c +else src_libwolfssl_la_SOURCES += wolfcrypt/src/tfm.c endif +endif if BUILD_SLOWMATH if BUILD_FIPS diff --git a/wolfcrypt/src/integer.c b/wolfcrypt/src/integer.c index e69de29bb..e46aa7007 100644 --- a/wolfcrypt/src/integer.c +++ b/wolfcrypt/src/integer.c @@ -0,0 +1,4493 @@ +/* integer.c + * + * Copyright (C) 2006-2014 wolfSSL Inc. + * + * This file is part of wolfSSL. (formerly known as CyaSSL) + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + + +/* + * Based on public domain LibTomMath 0.38 by Tom St Denis, tomstdenis@iahu.ca, + * http://math.libtomcrypt.com + */ + + +#ifdef HAVE_CONFIG_H + #include +#endif + +/* in case user set USE_FAST_MATH there */ +#include + +#ifndef NO_BIG_INT + +#ifndef USE_FAST_MATH + +#include + +#ifndef NO_WOLFSSL_SMALL_STACK + #ifndef WOLFSSL_SMALL_STACK + #define WOLFSSL_SMALL_STACK + #endif +#endif + +static void bn_reverse (unsigned char *s, int len); + +/* math settings check */ +word32 CheckRunTimeSettings(void) +{ + return CTC_SETTINGS; +} + + +/* handle up to 6 inits */ +int mp_init_multi(mp_int* a, mp_int* b, mp_int* c, mp_int* d, mp_int* e, + mp_int* f) +{ + int res = MP_OKAY; + + if (a && ((res = mp_init(a)) != MP_OKAY)) + return res; + + if (b && ((res = mp_init(b)) != MP_OKAY)) { + mp_clear(a); + return res; + } + + if (c && ((res = mp_init(c)) != MP_OKAY)) { + mp_clear(a); mp_clear(b); + return res; + } + + if (d && ((res = mp_init(d)) != MP_OKAY)) { + mp_clear(a); mp_clear(b); mp_clear(c); + return res; + } + + if (e && ((res = mp_init(e)) != MP_OKAY)) { + mp_clear(a); mp_clear(b); mp_clear(c); mp_clear(d); + return res; + } + + if (f && ((res = mp_init(f)) != MP_OKAY)) { + mp_clear(a); mp_clear(b); mp_clear(c); mp_clear(d); mp_clear(e); + return res; + } + + return res; +} + + +/* init a new mp_int */ +int mp_init (mp_int * a) +{ + int i; + + /* allocate memory required and clear it */ + a->dp = OPT_CAST(mp_digit) XMALLOC (sizeof (mp_digit) * MP_PREC, 0, + DYNAMIC_TYPE_BIGINT); + if (a->dp == NULL) { + return MP_MEM; + } + + /* set the digits to zero */ + for (i = 0; i < MP_PREC; i++) { + a->dp[i] = 0; + } + + /* set the used to zero, allocated digits to the default precision + * and sign to positive */ + a->used = 0; + a->alloc = MP_PREC; + a->sign = MP_ZPOS; + + return MP_OKAY; +} + + +/* clear one (frees) */ +void +mp_clear (mp_int * a) +{ + int i; + + if (a == NULL) + return; + + /* only do anything if a hasn't been freed previously */ + if (a->dp != NULL) { + /* first zero the digits */ + for (i = 0; i < a->used; i++) { + a->dp[i] = 0; + } + + /* free ram */ + XFREE(a->dp, 0, DYNAMIC_TYPE_BIGINT); + + /* reset members to make debugging easier */ + a->dp = NULL; + a->alloc = a->used = 0; + a->sign = MP_ZPOS; + } +} + + +/* get the size for an unsigned equivalent */ +int mp_unsigned_bin_size (mp_int * a) +{ + int size = mp_count_bits (a); + return (size / 8 + ((size & 7) != 0 ? 1 : 0)); +} + + +/* returns the number of bits in an int */ +int +mp_count_bits (mp_int * a) +{ + int r; + mp_digit q; + + /* shortcut */ + if (a->used == 0) { + return 0; + } + + /* get number of digits and add that */ + r = (a->used - 1) * DIGIT_BIT; + + /* take the last digit and count the bits in it */ + q = a->dp[a->used - 1]; + while (q > ((mp_digit) 0)) { + ++r; + q >>= ((mp_digit) 1); + } + return r; +} + + +int mp_leading_bit (mp_int * a) +{ + int bit = 0; + mp_int t; + + if (mp_init_copy(&t, a) != MP_OKAY) + return 0; + + while (mp_iszero(&t) == 0) { +#ifndef MP_8BIT + bit = (t.dp[0] & 0x80) != 0; +#else + bit = (t.dp[0] | ((t.dp[1] & 0x01) << 7)) & 0x80 != 0; +#endif + if (mp_div_2d (&t, 8, &t, NULL) != MP_OKAY) + break; + } + mp_clear(&t); + return bit; +} + + +/* store in unsigned [big endian] format */ +int mp_to_unsigned_bin (mp_int * a, unsigned char *b) +{ + int x, res; + mp_int t; + + if ((res = mp_init_copy (&t, a)) != MP_OKAY) { + return res; + } + + x = 0; + while (mp_iszero (&t) == 0) { +#ifndef MP_8BIT + b[x++] = (unsigned char) (t.dp[0] & 255); +#else + b[x++] = (unsigned char) (t.dp[0] | ((t.dp[1] & 0x01) << 7)); +#endif + if ((res = mp_div_2d (&t, 8, &t, NULL)) != MP_OKAY) { + mp_clear (&t); + return res; + } + } + bn_reverse (b, x); + mp_clear (&t); + return MP_OKAY; +} + + +/* creates "a" then copies b into it */ +int mp_init_copy (mp_int * a, mp_int * b) +{ + int res; + + if ((res = mp_init (a)) != MP_OKAY) { + return res; + } + return mp_copy (b, a); +} + + +/* copy, b = a */ +int +mp_copy (mp_int * a, mp_int * b) +{ + int res, n; + + /* if dst == src do nothing */ + if (a == b) { + return MP_OKAY; + } + + /* grow dest */ + if (b->alloc < a->used) { + if ((res = mp_grow (b, a->used)) != MP_OKAY) { + return res; + } + } + + /* zero b and copy the parameters over */ + { + register mp_digit *tmpa, *tmpb; + + /* pointer aliases */ + + /* source */ + tmpa = a->dp; + + /* destination */ + tmpb = b->dp; + + /* copy all the digits */ + for (n = 0; n < a->used; n++) { + *tmpb++ = *tmpa++; + } + + /* clear high digits */ + for (; n < b->used; n++) { + *tmpb++ = 0; + } + } + + /* copy used count and sign */ + b->used = a->used; + b->sign = a->sign; + return MP_OKAY; +} + + +/* grow as required */ +int mp_grow (mp_int * a, int size) +{ + int i; + mp_digit *tmp; + + /* if the alloc size is smaller alloc more ram */ + if (a->alloc < size) { + /* ensure there are always at least MP_PREC digits extra on top */ + size += (MP_PREC * 2) - (size % MP_PREC); + + /* reallocate the array a->dp + * + * We store the return in a temporary variable + * in case the operation failed we don't want + * to overwrite the dp member of a. + */ + tmp = OPT_CAST(mp_digit) XREALLOC (a->dp, sizeof (mp_digit) * size, 0, + DYNAMIC_TYPE_BIGINT); + if (tmp == NULL) { + /* reallocation failed but "a" is still valid [can be freed] */ + return MP_MEM; + } + + /* reallocation succeeded so set a->dp */ + a->dp = tmp; + + /* zero excess digits */ + i = a->alloc; + a->alloc = size; + for (; i < a->alloc; i++) { + a->dp[i] = 0; + } + } + return MP_OKAY; +} + + +/* reverse an array, used for radix code */ +void +bn_reverse (unsigned char *s, int len) +{ + int ix, iy; + unsigned char t; + + ix = 0; + iy = len - 1; + while (ix < iy) { + t = s[ix]; + s[ix] = s[iy]; + s[iy] = t; + ++ix; + --iy; + } +} + + +/* shift right by a certain bit count (store quotient in c, optional + remainder in d) */ +int mp_div_2d (mp_int * a, int b, mp_int * c, mp_int * d) +{ + int D, res; + mp_int t; + + + /* if the shift count is <= 0 then we do no work */ + if (b <= 0) { + res = mp_copy (a, c); + if (d != NULL) { + mp_zero (d); + } + return res; + } + + if ((res = mp_init (&t)) != MP_OKAY) { + return res; + } + + /* get the remainder */ + if (d != NULL) { + if ((res = mp_mod_2d (a, b, &t)) != MP_OKAY) { + mp_clear (&t); + return res; + } + } + + /* copy */ + if ((res = mp_copy (a, c)) != MP_OKAY) { + mp_clear (&t); + return res; + } + + /* shift by as many digits in the bit count */ + if (b >= (int)DIGIT_BIT) { + mp_rshd (c, b / DIGIT_BIT); + } + + /* shift any bit count < DIGIT_BIT */ + D = (b % DIGIT_BIT); + if (D != 0) { + mp_rshb(c, D); + } + mp_clamp (c); + if (d != NULL) { + mp_exch (&t, d); + } + mp_clear (&t); + return MP_OKAY; +} + + +/* set to zero */ +void mp_zero (mp_int * a) +{ + int n; + mp_digit *tmp; + + a->sign = MP_ZPOS; + a->used = 0; + + tmp = a->dp; + for (n = 0; n < a->alloc; n++) { + *tmp++ = 0; + } +} + + +/* trim unused digits + * + * This is used to ensure that leading zero digits are + * trimed and the leading "used" digit will be non-zero + * Typically very fast. Also fixes the sign if there + * are no more leading digits + */ +void +mp_clamp (mp_int * a) +{ + /* decrease used while the most significant digit is + * zero. + */ + while (a->used > 0 && a->dp[a->used - 1] == 0) { + --(a->used); + } + + /* reset the sign flag if used == 0 */ + if (a->used == 0) { + a->sign = MP_ZPOS; + } +} + + +/* swap the elements of two integers, for cases where you can't simply swap the + * mp_int pointers around + */ +void +mp_exch (mp_int * a, mp_int * b) +{ + mp_int t; + + t = *a; + *a = *b; + *b = t; +} + + +/* shift right a certain number of bits */ +void mp_rshb (mp_int *c, int x) +{ + register mp_digit *tmpc, mask, shift; + mp_digit r, rr; + mp_digit D = x; + + /* mask */ + mask = (((mp_digit)1) << D) - 1; + + /* shift for lsb */ + shift = DIGIT_BIT - D; + + /* alias */ + tmpc = c->dp + (c->used - 1); + + /* carry */ + r = 0; + for (x = c->used - 1; x >= 0; x--) { + /* get the lower bits of this word in a temp */ + rr = *tmpc & mask; + + /* shift the current word and mix in the carry bits from previous word */ + *tmpc = (*tmpc >> D) | (r << shift); + --tmpc; + + /* set the carry to the carry bits of the current word found above */ + r = rr; + } +} + + +/* shift right a certain amount of digits */ +void mp_rshd (mp_int * a, int b) +{ + int x; + + /* if b <= 0 then ignore it */ + if (b <= 0) { + return; + } + + /* if b > used then simply zero it and return */ + if (a->used <= b) { + mp_zero (a); + return; + } + + { + register mp_digit *bottom, *top; + + /* shift the digits down */ + + /* bottom */ + bottom = a->dp; + + /* top [offset into digits] */ + top = a->dp + b; + + /* this is implemented as a sliding window where + * the window is b-digits long and digits from + * the top of the window are copied to the bottom + * + * e.g. + + b-2 | b-1 | b0 | b1 | b2 | ... | bb | ----> + /\ | ----> + \-------------------/ ----> + */ + for (x = 0; x < (a->used - b); x++) { + *bottom++ = *top++; + } + + /* zero the top digits */ + for (; x < a->used; x++) { + *bottom++ = 0; + } + } + + /* remove excess digits */ + a->used -= b; +} + + +/* calc a value mod 2**b */ +int +mp_mod_2d (mp_int * a, int b, mp_int * c) +{ + int x, res; + + /* if b is <= 0 then zero the int */ + if (b <= 0) { + mp_zero (c); + return MP_OKAY; + } + + /* if the modulus is larger than the value than return */ + if (b >= (int) (a->used * DIGIT_BIT)) { + res = mp_copy (a, c); + return res; + } + + /* copy */ + if ((res = mp_copy (a, c)) != MP_OKAY) { + return res; + } + + /* zero digits above the last digit of the modulus */ + for (x = (b / DIGIT_BIT) + ((b % DIGIT_BIT) == 0 ? 0 : 1); x < c->used; x++) { + c->dp[x] = 0; + } + /* clear the digit that is not completely outside/inside the modulus */ + c->dp[b / DIGIT_BIT] &= (mp_digit) ((((mp_digit) 1) << + (((mp_digit) b) % DIGIT_BIT)) - ((mp_digit) 1)); + mp_clamp (c); + return MP_OKAY; +} + + +/* reads a unsigned char array, assumes the msb is stored first [big endian] */ +int mp_read_unsigned_bin (mp_int * a, const unsigned char *b, int c) +{ + int res; + + /* make sure there are at least two digits */ + if (a->alloc < 2) { + if ((res = mp_grow(a, 2)) != MP_OKAY) { + return res; + } + } + + /* zero the int */ + mp_zero (a); + + /* read the bytes in */ + while (c-- > 0) { + if ((res = mp_mul_2d (a, 8, a)) != MP_OKAY) { + return res; + } + +#ifndef MP_8BIT + a->dp[0] |= *b++; + a->used += 1; +#else + a->dp[0] = (*b & MP_MASK); + a->dp[1] |= ((*b++ >> 7U) & 1); + a->used += 2; +#endif + } + mp_clamp (a); + return MP_OKAY; +} + + +/* shift left by a certain bit count */ +int mp_mul_2d (mp_int * a, int b, mp_int * c) +{ + mp_digit d; + int res; + + /* copy */ + if (a != c) { + if ((res = mp_copy (a, c)) != MP_OKAY) { + return res; + } + } + + if (c->alloc < (int)(c->used + b/DIGIT_BIT + 1)) { + if ((res = mp_grow (c, c->used + b / DIGIT_BIT + 1)) != MP_OKAY) { + return res; + } + } + + /* shift by as many digits in the bit count */ + if (b >= (int)DIGIT_BIT) { + if ((res = mp_lshd (c, b / DIGIT_BIT)) != MP_OKAY) { + return res; + } + } + + /* shift any bit count < DIGIT_BIT */ + d = (mp_digit) (b % DIGIT_BIT); + if (d != 0) { + register mp_digit *tmpc, shift, mask, r, rr; + register int x; + + /* bitmask for carries */ + mask = (((mp_digit)1) << d) - 1; + + /* shift for msbs */ + shift = DIGIT_BIT - d; + + /* alias */ + tmpc = c->dp; + + /* carry */ + r = 0; + for (x = 0; x < c->used; x++) { + /* get the higher bits of the current word */ + rr = (*tmpc >> shift) & mask; + + /* shift the current word and OR in the carry */ + *tmpc = ((*tmpc << d) | r) & MP_MASK; + ++tmpc; + + /* set the carry to the carry bits of the current word */ + r = rr; + } + + /* set final carry */ + if (r != 0) { + c->dp[(c->used)++] = r; + } + } + mp_clamp (c); + return MP_OKAY; +} + + +/* shift left a certain amount of digits */ +int mp_lshd (mp_int * a, int b) +{ + int x, res; + + /* if its less than zero return */ + if (b <= 0) { + return MP_OKAY; + } + + /* grow to fit the new digits */ + if (a->alloc < a->used + b) { + if ((res = mp_grow (a, a->used + b)) != MP_OKAY) { + return res; + } + } + + { + register mp_digit *top, *bottom; + + /* increment the used by the shift amount then copy upwards */ + a->used += b; + + /* top */ + top = a->dp + a->used - 1; + + /* base */ + bottom = a->dp + a->used - 1 - b; + + /* much like mp_rshd this is implemented using a sliding window + * except the window goes the otherway around. Copying from + * the bottom to the top. see bn_mp_rshd.c for more info. + */ + for (x = a->used - 1; x >= b; x--) { + *top-- = *bottom--; + } + + /* zero the lower digits */ + top = a->dp; + for (x = 0; x < b; x++) { + *top++ = 0; + } + } + return MP_OKAY; +} + + +/* this is a shell function that calls either the normal or Montgomery + * exptmod functions. Originally the call to the montgomery code was + * embedded in the normal function but that wasted alot of stack space + * for nothing (since 99% of the time the Montgomery code would be called) + */ +int mp_exptmod (mp_int * G, mp_int * X, mp_int * P, mp_int * Y) +{ + int dr; + + /* modulus P must be positive */ + if (P->sign == MP_NEG) { + return MP_VAL; + } + + /* if exponent X is negative we have to recurse */ + if (X->sign == MP_NEG) { +#ifdef BN_MP_INVMOD_C + mp_int tmpG, tmpX; + int err; + + /* first compute 1/G mod P */ + if ((err = mp_init(&tmpG)) != MP_OKAY) { + return err; + } + if ((err = mp_invmod(G, P, &tmpG)) != MP_OKAY) { + mp_clear(&tmpG); + return err; + } + + /* now get |X| */ + if ((err = mp_init(&tmpX)) != MP_OKAY) { + mp_clear(&tmpG); + return err; + } + if ((err = mp_abs(X, &tmpX)) != MP_OKAY) { + mp_clear(&tmpG); + mp_clear(&tmpX); + return err; + } + + /* and now compute (1/G)**|X| instead of G**X [X < 0] */ + err = mp_exptmod(&tmpG, &tmpX, P, Y); + mp_clear(&tmpG); + mp_clear(&tmpX); + return err; +#else + /* no invmod */ + return MP_VAL; +#endif + } + +/* modified diminished radix reduction */ +#if defined(BN_MP_REDUCE_IS_2K_L_C) && defined(BN_MP_REDUCE_2K_L_C) && \ + defined(BN_S_MP_EXPTMOD_C) + if (mp_reduce_is_2k_l(P) == MP_YES) { + return s_mp_exptmod(G, X, P, Y, 1); + } +#endif + +#ifdef BN_MP_DR_IS_MODULUS_C + /* is it a DR modulus? */ + dr = mp_dr_is_modulus(P); +#else + /* default to no */ + dr = 0; +#endif + +#ifdef BN_MP_REDUCE_IS_2K_C + /* if not, is it a unrestricted DR modulus? */ + if (dr == 0) { + dr = mp_reduce_is_2k(P) << 1; + } +#endif + + /* if the modulus is odd or dr != 0 use the montgomery method */ +#ifdef BN_MP_EXPTMOD_FAST_C + if (mp_isodd (P) == 1 || dr != 0) { + return mp_exptmod_fast (G, X, P, Y, dr); + } else { +#endif +#ifdef BN_S_MP_EXPTMOD_C + /* otherwise use the generic Barrett reduction technique */ + return s_mp_exptmod (G, X, P, Y, 0); +#else + /* no exptmod for evens */ + return MP_VAL; +#endif +#ifdef BN_MP_EXPTMOD_FAST_C + } +#endif +} + + +/* b = |a| + * + * Simple function copies the input and fixes the sign to positive + */ +int +mp_abs (mp_int * a, mp_int * b) +{ + int res; + + /* copy a to b */ + if (a != b) { + if ((res = mp_copy (a, b)) != MP_OKAY) { + return res; + } + } + + /* force the sign of b to positive */ + b->sign = MP_ZPOS; + + return MP_OKAY; +} + + +/* hac 14.61, pp608 */ +int mp_invmod (mp_int * a, mp_int * b, mp_int * c) +{ + /* b cannot be negative */ + if (b->sign == MP_NEG || mp_iszero(b) == 1) { + return MP_VAL; + } + +#ifdef BN_FAST_MP_INVMOD_C + /* if the modulus is odd we can use a faster routine instead */ + if (mp_isodd (b) == 1) { + return fast_mp_invmod (a, b, c); + } +#endif + +#ifdef BN_MP_INVMOD_SLOW_C + return mp_invmod_slow(a, b, c); +#endif +} + + +/* computes the modular inverse via binary extended euclidean algorithm, + * that is c = 1/a mod b + * + * Based on slow invmod except this is optimized for the case where b is + * odd as per HAC Note 14.64 on pp. 610 + */ +int fast_mp_invmod (mp_int * a, mp_int * b, mp_int * c) +{ + mp_int x, y, u, v, B, D; + int res, neg; + + /* 2. [modified] b must be odd */ + if (mp_iseven (b) == 1) { + return MP_VAL; + } + + /* init all our temps */ + if ((res = mp_init_multi(&x, &y, &u, &v, &B, &D)) != MP_OKAY) { + return res; + } + + /* x == modulus, y == value to invert */ + if ((res = mp_copy (b, &x)) != MP_OKAY) { + goto LBL_ERR; + } + + /* we need y = |a| */ + if ((res = mp_mod (a, b, &y)) != MP_OKAY) { + goto LBL_ERR; + } + + /* 3. u=x, v=y, A=1, B=0, C=0,D=1 */ + if ((res = mp_copy (&x, &u)) != MP_OKAY) { + goto LBL_ERR; + } + if ((res = mp_copy (&y, &v)) != MP_OKAY) { + goto LBL_ERR; + } + mp_set (&D, 1); + +top: + /* 4. while u is even do */ + while (mp_iseven (&u) == 1) { + /* 4.1 u = u/2 */ + if ((res = mp_div_2 (&u, &u)) != MP_OKAY) { + goto LBL_ERR; + } + /* 4.2 if B is odd then */ + if (mp_isodd (&B) == 1) { + if ((res = mp_sub (&B, &x, &B)) != MP_OKAY) { + goto LBL_ERR; + } + } + /* B = B/2 */ + if ((res = mp_div_2 (&B, &B)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* 5. while v is even do */ + while (mp_iseven (&v) == 1) { + /* 5.1 v = v/2 */ + if ((res = mp_div_2 (&v, &v)) != MP_OKAY) { + goto LBL_ERR; + } + /* 5.2 if D is odd then */ + if (mp_isodd (&D) == 1) { + /* D = (D-x)/2 */ + if ((res = mp_sub (&D, &x, &D)) != MP_OKAY) { + goto LBL_ERR; + } + } + /* D = D/2 */ + if ((res = mp_div_2 (&D, &D)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* 6. if u >= v then */ + if (mp_cmp (&u, &v) != MP_LT) { + /* u = u - v, B = B - D */ + if ((res = mp_sub (&u, &v, &u)) != MP_OKAY) { + goto LBL_ERR; + } + + if ((res = mp_sub (&B, &D, &B)) != MP_OKAY) { + goto LBL_ERR; + } + } else { + /* v - v - u, D = D - B */ + if ((res = mp_sub (&v, &u, &v)) != MP_OKAY) { + goto LBL_ERR; + } + + if ((res = mp_sub (&D, &B, &D)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* if not zero goto step 4 */ + if (mp_iszero (&u) == 0) { + goto top; + } + + /* now a = C, b = D, gcd == g*v */ + + /* if v != 1 then there is no inverse */ + if (mp_cmp_d (&v, 1) != MP_EQ) { + res = MP_VAL; + goto LBL_ERR; + } + + /* b is now the inverse */ + neg = a->sign; + while (D.sign == MP_NEG) { + if ((res = mp_add (&D, b, &D)) != MP_OKAY) { + goto LBL_ERR; + } + } + mp_exch (&D, c); + c->sign = neg; + res = MP_OKAY; + +LBL_ERR:mp_clear(&x); + mp_clear(&y); + mp_clear(&u); + mp_clear(&v); + mp_clear(&B); + mp_clear(&D); + return res; +} + + +/* hac 14.61, pp608 */ +int mp_invmod_slow (mp_int * a, mp_int * b, mp_int * c) +{ + mp_int x, y, u, v, A, B, C, D; + int res; + + /* b cannot be negative */ + if (b->sign == MP_NEG || mp_iszero(b) == 1) { + return MP_VAL; + } + + /* init temps */ + if ((res = mp_init_multi(&x, &y, &u, &v, + &A, &B)) != MP_OKAY) { + return res; + } + + /* init rest of tmps temps */ + if ((res = mp_init_multi(&C, &D, 0, 0, 0, 0)) != MP_OKAY) { + return res; + } + + /* x = a, y = b */ + if ((res = mp_mod(a, b, &x)) != MP_OKAY) { + goto LBL_ERR; + } + if ((res = mp_copy (b, &y)) != MP_OKAY) { + goto LBL_ERR; + } + + /* 2. [modified] if x,y are both even then return an error! */ + if (mp_iseven (&x) == 1 && mp_iseven (&y) == 1) { + res = MP_VAL; + goto LBL_ERR; + } + + /* 3. u=x, v=y, A=1, B=0, C=0,D=1 */ + if ((res = mp_copy (&x, &u)) != MP_OKAY) { + goto LBL_ERR; + } + if ((res = mp_copy (&y, &v)) != MP_OKAY) { + goto LBL_ERR; + } + mp_set (&A, 1); + mp_set (&D, 1); + +top: + /* 4. while u is even do */ + while (mp_iseven (&u) == 1) { + /* 4.1 u = u/2 */ + if ((res = mp_div_2 (&u, &u)) != MP_OKAY) { + goto LBL_ERR; + } + /* 4.2 if A or B is odd then */ + if (mp_isodd (&A) == 1 || mp_isodd (&B) == 1) { + /* A = (A+y)/2, B = (B-x)/2 */ + if ((res = mp_add (&A, &y, &A)) != MP_OKAY) { + goto LBL_ERR; + } + if ((res = mp_sub (&B, &x, &B)) != MP_OKAY) { + goto LBL_ERR; + } + } + /* A = A/2, B = B/2 */ + if ((res = mp_div_2 (&A, &A)) != MP_OKAY) { + goto LBL_ERR; + } + if ((res = mp_div_2 (&B, &B)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* 5. while v is even do */ + while (mp_iseven (&v) == 1) { + /* 5.1 v = v/2 */ + if ((res = mp_div_2 (&v, &v)) != MP_OKAY) { + goto LBL_ERR; + } + /* 5.2 if C or D is odd then */ + if (mp_isodd (&C) == 1 || mp_isodd (&D) == 1) { + /* C = (C+y)/2, D = (D-x)/2 */ + if ((res = mp_add (&C, &y, &C)) != MP_OKAY) { + goto LBL_ERR; + } + if ((res = mp_sub (&D, &x, &D)) != MP_OKAY) { + goto LBL_ERR; + } + } + /* C = C/2, D = D/2 */ + if ((res = mp_div_2 (&C, &C)) != MP_OKAY) { + goto LBL_ERR; + } + if ((res = mp_div_2 (&D, &D)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* 6. if u >= v then */ + if (mp_cmp (&u, &v) != MP_LT) { + /* u = u - v, A = A - C, B = B - D */ + if ((res = mp_sub (&u, &v, &u)) != MP_OKAY) { + goto LBL_ERR; + } + + if ((res = mp_sub (&A, &C, &A)) != MP_OKAY) { + goto LBL_ERR; + } + + if ((res = mp_sub (&B, &D, &B)) != MP_OKAY) { + goto LBL_ERR; + } + } else { + /* v - v - u, C = C - A, D = D - B */ + if ((res = mp_sub (&v, &u, &v)) != MP_OKAY) { + goto LBL_ERR; + } + + if ((res = mp_sub (&C, &A, &C)) != MP_OKAY) { + goto LBL_ERR; + } + + if ((res = mp_sub (&D, &B, &D)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* if not zero goto step 4 */ + if (mp_iszero (&u) == 0) + goto top; + + /* now a = C, b = D, gcd == g*v */ + + /* if v != 1 then there is no inverse */ + if (mp_cmp_d (&v, 1) != MP_EQ) { + res = MP_VAL; + goto LBL_ERR; + } + + /* if its too low */ + while (mp_cmp_d(&C, 0) == MP_LT) { + if ((res = mp_add(&C, b, &C)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* too big */ + while (mp_cmp_mag(&C, b) != MP_LT) { + if ((res = mp_sub(&C, b, &C)) != MP_OKAY) { + goto LBL_ERR; + } + } + + /* C is now the inverse */ + mp_exch (&C, c); + res = MP_OKAY; +LBL_ERR:mp_clear(&x); + mp_clear(&y); + mp_clear(&u); + mp_clear(&v); + mp_clear(&A); + mp_clear(&B); + mp_clear(&C); + mp_clear(&D); + return res; +} + + +/* compare maginitude of two ints (unsigned) */ +int mp_cmp_mag (mp_int * a, mp_int * b) +{ + int n; + mp_digit *tmpa, *tmpb; + + /* compare based on # of non-zero digits */ + if (a->used > b->used) { + return MP_GT; + } + + if (a->used < b->used) { + return MP_LT; + } + + /* alias for a */ + tmpa = a->dp + (a->used - 1); + + /* alias for b */ + tmpb = b->dp + (a->used - 1); + + /* compare based on digits */ + for (n = 0; n < a->used; ++n, --tmpa, --tmpb) { + if (*tmpa > *tmpb) { + return MP_GT; + } + + if (*tmpa < *tmpb) { + return MP_LT; + } + } + return MP_EQ; +} + + +/* compare two ints (signed)*/ +int +mp_cmp (mp_int * a, mp_int * b) +{ + /* compare based on sign */ + if (a->sign != b->sign) { + if (a->sign == MP_NEG) { + return MP_LT; + } else { + return MP_GT; + } + } + + /* compare digits */ + if (a->sign == MP_NEG) { + /* if negative compare opposite direction */ + return mp_cmp_mag(b, a); + } else { + return mp_cmp_mag(a, b); + } +} + + +/* compare a digit */ +int mp_cmp_d(mp_int * a, mp_digit b) +{ + /* compare based on sign */ + if (a->sign == MP_NEG) { + return MP_LT; + } + + /* compare based on magnitude */ + if (a->used > 1) { + return MP_GT; + } + + /* compare the only digit of a to b */ + if (a->dp[0] > b) { + return MP_GT; + } else if (a->dp[0] < b) { + return MP_LT; + } else { + return MP_EQ; + } +} + + +/* set to a digit */ +void mp_set (mp_int * a, mp_digit b) +{ + mp_zero (a); + a->dp[0] = b & MP_MASK; + a->used = (a->dp[0] != 0) ? 1 : 0; +} + + +/* c = a mod b, 0 <= c < b */ +int +mp_mod (mp_int * a, mp_int * b, mp_int * c) +{ + mp_int t; + int res; + + if ((res = mp_init (&t)) != MP_OKAY) { + return res; + } + + if ((res = mp_div (a, b, NULL, &t)) != MP_OKAY) { + mp_clear (&t); + return res; + } + + if (t.sign != b->sign) { + res = mp_add (b, &t, c); + } else { + res = MP_OKAY; + mp_exch (&t, c); + } + + mp_clear (&t); + return res; +} + + +/* slower bit-bang division... also smaller */ +int mp_div(mp_int * a, mp_int * b, mp_int * c, mp_int * d) +{ + mp_int ta, tb, tq, q; + int res, n, n2; + + /* is divisor zero ? */ + if (mp_iszero (b) == 1) { + return MP_VAL; + } + + /* if a < b then q=0, r = a */ + if (mp_cmp_mag (a, b) == MP_LT) { + if (d != NULL) { + res = mp_copy (a, d); + } else { + res = MP_OKAY; + } + if (c != NULL) { + mp_zero (c); + } + return res; + } + + /* init our temps */ + if ((res = mp_init_multi(&ta, &tb, &tq, &q, 0, 0)) != MP_OKAY) { + return res; + } + + + mp_set(&tq, 1); + n = mp_count_bits(a) - mp_count_bits(b); + if (((res = mp_abs(a, &ta)) != MP_OKAY) || + ((res = mp_abs(b, &tb)) != MP_OKAY) || + ((res = mp_mul_2d(&tb, n, &tb)) != MP_OKAY) || + ((res = mp_mul_2d(&tq, n, &tq)) != MP_OKAY)) { + goto LBL_ERR; + } + + while (n-- >= 0) { + if (mp_cmp(&tb, &ta) != MP_GT) { + if (((res = mp_sub(&ta, &tb, &ta)) != MP_OKAY) || + ((res = mp_add(&q, &tq, &q)) != MP_OKAY)) { + goto LBL_ERR; + } + } + if (((res = mp_div_2d(&tb, 1, &tb, NULL)) != MP_OKAY) || + ((res = mp_div_2d(&tq, 1, &tq, NULL)) != MP_OKAY)) { + goto LBL_ERR; + } + } + + /* now q == quotient and ta == remainder */ + n = a->sign; + n2 = (a->sign == b->sign ? MP_ZPOS : MP_NEG); + if (c != NULL) { + mp_exch(c, &q); + c->sign = (mp_iszero(c) == MP_YES) ? MP_ZPOS : n2; + } + if (d != NULL) { + mp_exch(d, &ta); + d->sign = (mp_iszero(d) == MP_YES) ? MP_ZPOS : n; + } +LBL_ERR: + mp_clear(&ta); + mp_clear(&tb); + mp_clear(&tq); + mp_clear(&q); + return res; +} + + +/* b = a/2 */ +int mp_div_2(mp_int * a, mp_int * b) +{ + int x, res, oldused; + + /* copy */ + if (b->alloc < a->used) { + if ((res = mp_grow (b, a->used)) != MP_OKAY) { + return res; + } + } + + oldused = b->used; + b->used = a->used; + { + register mp_digit r, rr, *tmpa, *tmpb; + + /* source alias */ + tmpa = a->dp + b->used - 1; + + /* dest alias */ + tmpb = b->dp + b->used - 1; + + /* carry */ + r = 0; + for (x = b->used - 1; x >= 0; x--) { + /* get the carry for the next iteration */ + rr = *tmpa & 1; + + /* shift the current digit, add in carry and store */ + *tmpb-- = (*tmpa-- >> 1) | (r << (DIGIT_BIT - 1)); + + /* forward carry to next iteration */ + r = rr; + } + + /* zero excess digits */ + tmpb = b->dp + b->used; + for (x = b->used; x < oldused; x++) { + *tmpb++ = 0; + } + } + b->sign = a->sign; + mp_clamp (b); + return MP_OKAY; +} + + +/* high level addition (handles signs) */ +int mp_add (mp_int * a, mp_int * b, mp_int * c) +{ + int sa, sb, res; + + /* get sign of both inputs */ + sa = a->sign; + sb = b->sign; + + /* handle two cases, not four */ + if (sa == sb) { + /* both positive or both negative */ + /* add their magnitudes, copy the sign */ + c->sign = sa; + res = s_mp_add (a, b, c); + } else { + /* one positive, the other negative */ + /* subtract the one with the greater magnitude from */ + /* the one of the lesser magnitude. The result gets */ + /* the sign of the one with the greater magnitude. */ + if (mp_cmp_mag (a, b) == MP_LT) { + c->sign = sb; + res = s_mp_sub (b, a, c); + } else { + c->sign = sa; + res = s_mp_sub (a, b, c); + } + } + return res; +} + + +/* low level addition, based on HAC pp.594, Algorithm 14.7 */ +int +s_mp_add (mp_int * a, mp_int * b, mp_int * c) +{ + mp_int *x; + int olduse, res, min, max; + + /* find sizes, we let |a| <= |b| which means we have to sort + * them. "x" will point to the input with the most digits + */ + if (a->used > b->used) { + min = b->used; + max = a->used; + x = a; + } else { + min = a->used; + max = b->used; + x = b; + } + + /* init result */ + if (c->alloc < max + 1) { + if ((res = mp_grow (c, max + 1)) != MP_OKAY) { + return res; + } + } + + /* get old used digit count and set new one */ + olduse = c->used; + c->used = max + 1; + + { + register mp_digit u, *tmpa, *tmpb, *tmpc; + register int i; + + /* alias for digit pointers */ + + /* first input */ + tmpa = a->dp; + + /* second input */ + tmpb = b->dp; + + /* destination */ + tmpc = c->dp; + + /* zero the carry */ + u = 0; + for (i = 0; i < min; i++) { + /* Compute the sum at one digit, T[i] = A[i] + B[i] + U */ + *tmpc = *tmpa++ + *tmpb++ + u; + + /* U = carry bit of T[i] */ + u = *tmpc >> ((mp_digit)DIGIT_BIT); + + /* take away carry bit from T[i] */ + *tmpc++ &= MP_MASK; + } + + /* now copy higher words if any, that is in A+B + * if A or B has more digits add those in + */ + if (min != max) { + for (; i < max; i++) { + /* T[i] = X[i] + U */ + *tmpc = x->dp[i] + u; + + /* U = carry bit of T[i] */ + u = *tmpc >> ((mp_digit)DIGIT_BIT); + + /* take away carry bit from T[i] */ + *tmpc++ &= MP_MASK; + } + } + + /* add carry */ + *tmpc++ = u; + + /* clear digits above oldused */ + for (i = c->used; i < olduse; i++) { + *tmpc++ = 0; + } + } + + mp_clamp (c); + return MP_OKAY; +} + + +/* low level subtraction (assumes |a| > |b|), HAC pp.595 Algorithm 14.9 */ +int +s_mp_sub (mp_int * a, mp_int * b, mp_int * c) +{ + int olduse, res, min, max; + + /* find sizes */ + min = b->used; + max = a->used; + + /* init result */ + if (c->alloc < max) { + if ((res = mp_grow (c, max)) != MP_OKAY) { + return res; + } + } + olduse = c->used; + c->used = max; + + { + register mp_digit u, *tmpa, *tmpb, *tmpc; + register int i; + + /* alias for digit pointers */ + tmpa = a->dp; + tmpb = b->dp; + tmpc = c->dp; + + /* set carry to zero */ + u = 0; + for (i = 0; i < min; i++) { + /* T[i] = A[i] - B[i] - U */ + *tmpc = *tmpa++ - *tmpb++ - u; + + /* U = carry bit of T[i] + * Note this saves performing an AND operation since + * if a carry does occur it will propagate all the way to the + * MSB. As a result a single shift is enough to get the carry + */ + u = *tmpc >> ((mp_digit)(CHAR_BIT * sizeof (mp_digit) - 1)); + + /* Clear carry from T[i] */ + *tmpc++ &= MP_MASK; + } + + /* now copy higher words if any, e.g. if A has more digits than B */ + for (; i < max; i++) { + /* T[i] = A[i] - U */ + *tmpc = *tmpa++ - u; + + /* U = carry bit of T[i] */ + u = *tmpc >> ((mp_digit)(CHAR_BIT * sizeof (mp_digit) - 1)); + + /* Clear carry from T[i] */ + *tmpc++ &= MP_MASK; + } + + /* clear digits above used (since we may not have grown result above) */ + for (i = c->used; i < olduse; i++) { + *tmpc++ = 0; + } + } + + mp_clamp (c); + return MP_OKAY; +} + + +/* high level subtraction (handles signs) */ +int +mp_sub (mp_int * a, mp_int * b, mp_int * c) +{ + int sa, sb, res; + + sa = a->sign; + sb = b->sign; + + if (sa != sb) { + /* subtract a negative from a positive, OR */ + /* subtract a positive from a negative. */ + /* In either case, ADD their magnitudes, */ + /* and use the sign of the first number. */ + c->sign = sa; + res = s_mp_add (a, b, c); + } else { + /* subtract a positive from a positive, OR */ + /* subtract a negative from a negative. */ + /* First, take the difference between their */ + /* magnitudes, then... */ + if (mp_cmp_mag (a, b) != MP_LT) { + /* Copy the sign from the first */ + c->sign = sa; + /* The first has a larger or equal magnitude */ + res = s_mp_sub (a, b, c); + } else { + /* The result has the *opposite* sign from */ + /* the first number. */ + c->sign = (sa == MP_ZPOS) ? MP_NEG : MP_ZPOS; + /* The second has a larger magnitude */ + res = s_mp_sub (b, a, c); + } + } + return res; +} + + +/* determines if reduce_2k_l can be used */ +int mp_reduce_is_2k_l(mp_int *a) +{ + int ix, iy; + + if (a->used == 0) { + return MP_NO; + } else if (a->used == 1) { + return MP_YES; + } else if (a->used > 1) { + /* if more than half of the digits are -1 we're sold */ + for (iy = ix = 0; ix < a->used; ix++) { + if (a->dp[ix] == MP_MASK) { + ++iy; + } + } + return (iy >= (a->used/2)) ? MP_YES : MP_NO; + + } + return MP_NO; +} + + +/* determines if mp_reduce_2k can be used */ +int mp_reduce_is_2k(mp_int *a) +{ + int ix, iy, iw; + mp_digit iz; + + if (a->used == 0) { + return MP_NO; + } else if (a->used == 1) { + return MP_YES; + } else if (a->used > 1) { + iy = mp_count_bits(a); + iz = 1; + iw = 1; + + /* Test every bit from the second digit up, must be 1 */ + for (ix = DIGIT_BIT; ix < iy; ix++) { + if ((a->dp[iw] & iz) == 0) { + return MP_NO; + } + iz <<= 1; + if (iz > (mp_digit)MP_MASK) { + ++iw; + iz = 1; + } + } + } + return MP_YES; +} + + +/* determines if a number is a valid DR modulus */ +int mp_dr_is_modulus(mp_int *a) +{ + int ix; + + /* must be at least two digits */ + if (a->used < 2) { + return 0; + } + + /* must be of the form b**k - a [a <= b] so all + * but the first digit must be equal to -1 (mod b). + */ + for (ix = 1; ix < a->used; ix++) { + if (a->dp[ix] != MP_MASK) { + return 0; + } + } + return 1; +} + + +/* computes Y == G**X mod P, HAC pp.616, Algorithm 14.85 + * + * Uses a left-to-right k-ary sliding window to compute the modular + * exponentiation. + * The value of k changes based on the size of the exponent. + * + * Uses Montgomery or Diminished Radix reduction [whichever appropriate] + */ + +#ifdef MP_LOW_MEM + #define TAB_SIZE 32 +#else + #define TAB_SIZE 256 +#endif + +int mp_exptmod_fast (mp_int * G, mp_int * X, mp_int * P, mp_int * Y, + int redmode) +{ + mp_int M[TAB_SIZE], res; + mp_digit buf, mp; + int err, bitbuf, bitcpy, bitcnt, mode, digidx, x, y, winsize; + + /* use a pointer to the reduction algorithm. This allows us to use + * one of many reduction algorithms without modding the guts of + * the code with if statements everywhere. + */ + int (*redux)(mp_int*,mp_int*,mp_digit); + + /* find window size */ + x = mp_count_bits (X); + if (x <= 7) { + winsize = 2; + } else if (x <= 36) { + winsize = 3; + } else if (x <= 140) { + winsize = 4; + } else if (x <= 450) { + winsize = 5; + } else if (x <= 1303) { + winsize = 6; + } else if (x <= 3529) { + winsize = 7; + } else { + winsize = 8; + } + +#ifdef MP_LOW_MEM + if (winsize > 5) { + winsize = 5; + } +#endif + + /* init M array */ + /* init first cell */ + if ((err = mp_init(&M[1])) != MP_OKAY) { + return err; + } + + /* now init the second half of the array */ + for (x = 1<<(winsize-1); x < (1 << winsize); x++) { + if ((err = mp_init(&M[x])) != MP_OKAY) { + for (y = 1<<(winsize-1); y < x; y++) { + mp_clear (&M[y]); + } + mp_clear(&M[1]); + return err; + } + } + + /* determine and setup reduction code */ + if (redmode == 0) { +#ifdef BN_MP_MONTGOMERY_SETUP_C + /* now setup montgomery */ + if ((err = mp_montgomery_setup (P, &mp)) != MP_OKAY) { + goto LBL_M; + } +#else + err = MP_VAL; + goto LBL_M; +#endif + + /* automatically pick the comba one if available (saves quite a few + calls/ifs) */ +#ifdef BN_FAST_MP_MONTGOMERY_REDUCE_C + if (((P->used * 2 + 1) < MP_WARRAY) && + P->used < (1 << ((CHAR_BIT * sizeof (mp_word)) - (2 * DIGIT_BIT)))) { + redux = fast_mp_montgomery_reduce; + } else +#endif + { +#ifdef BN_MP_MONTGOMERY_REDUCE_C + /* use slower baseline Montgomery method */ + redux = mp_montgomery_reduce; +#else + err = MP_VAL; + goto LBL_M; +#endif + } + } else if (redmode == 1) { +#if defined(BN_MP_DR_SETUP_C) && defined(BN_MP_DR_REDUCE_C) + /* setup DR reduction for moduli of the form B**k - b */ + mp_dr_setup(P, &mp); + redux = mp_dr_reduce; +#else + err = MP_VAL; + goto LBL_M; +#endif + } else { +#if defined(BN_MP_REDUCE_2K_SETUP_C) && defined(BN_MP_REDUCE_2K_C) + /* setup DR reduction for moduli of the form 2**k - b */ + if ((err = mp_reduce_2k_setup(P, &mp)) != MP_OKAY) { + goto LBL_M; + } + redux = mp_reduce_2k; +#else + err = MP_VAL; + goto LBL_M; +#endif + } + + /* setup result */ + if ((err = mp_init (&res)) != MP_OKAY) { + goto LBL_M; + } + + /* create M table + * + + * + * The first half of the table is not computed though accept for M[0] and M[1] + */ + + if (redmode == 0) { +#ifdef BN_MP_MONTGOMERY_CALC_NORMALIZATION_C + /* now we need R mod m */ + if ((err = mp_montgomery_calc_normalization (&res, P)) != MP_OKAY) { + goto LBL_RES; + } +#else + err = MP_VAL; + goto LBL_RES; +#endif + + /* now set M[1] to G * R mod m */ + if ((err = mp_mulmod (G, &res, P, &M[1])) != MP_OKAY) { + goto LBL_RES; + } + } else { + mp_set(&res, 1); + if ((err = mp_mod(G, P, &M[1])) != MP_OKAY) { + goto LBL_RES; + } + } + + /* compute the value at M[1<<(winsize-1)] by squaring M[1] (winsize-1) times*/ + if ((err = mp_copy (&M[1], &M[(mp_digit)(1 << (winsize - 1))])) != MP_OKAY) { + goto LBL_RES; + } + + for (x = 0; x < (winsize - 1); x++) { + if ((err = mp_sqr (&M[(mp_digit)(1 << (winsize - 1))], &M[(mp_digit)(1 << (winsize - 1))])) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&M[(mp_digit)(1 << (winsize - 1))], P, mp)) != MP_OKAY) { + goto LBL_RES; + } + } + + /* create upper table */ + for (x = (1 << (winsize - 1)) + 1; x < (1 << winsize); x++) { + if ((err = mp_mul (&M[x - 1], &M[1], &M[x])) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&M[x], P, mp)) != MP_OKAY) { + goto LBL_RES; + } + } + + /* set initial mode and bit cnt */ + mode = 0; + bitcnt = 1; + buf = 0; + digidx = X->used - 1; + bitcpy = 0; + bitbuf = 0; + + for (;;) { + /* grab next digit as required */ + if (--bitcnt == 0) { + /* if digidx == -1 we are out of digits so break */ + if (digidx == -1) { + break; + } + /* read next digit and reset bitcnt */ + buf = X->dp[digidx--]; + bitcnt = (int)DIGIT_BIT; + } + + /* grab the next msb from the exponent */ + y = (int)(buf >> (DIGIT_BIT - 1)) & 1; + buf <<= (mp_digit)1; + + /* if the bit is zero and mode == 0 then we ignore it + * These represent the leading zero bits before the first 1 bit + * in the exponent. Technically this opt is not required but it + * does lower the # of trivial squaring/reductions used + */ + if (mode == 0 && y == 0) { + continue; + } + + /* if the bit is zero and mode == 1 then we square */ + if (mode == 1 && y == 0) { + if ((err = mp_sqr (&res, &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, mp)) != MP_OKAY) { + goto LBL_RES; + } + continue; + } + + /* else we add it to the window */ + bitbuf |= (y << (winsize - ++bitcpy)); + mode = 2; + + if (bitcpy == winsize) { + /* ok window is filled so square as required and multiply */ + /* square first */ + for (x = 0; x < winsize; x++) { + if ((err = mp_sqr (&res, &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, mp)) != MP_OKAY) { + goto LBL_RES; + } + } + + /* then multiply */ + if ((err = mp_mul (&res, &M[bitbuf], &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, mp)) != MP_OKAY) { + goto LBL_RES; + } + + /* empty window and reset */ + bitcpy = 0; + bitbuf = 0; + mode = 1; + } + } + + /* if bits remain then square/multiply */ + if (mode == 2 && bitcpy > 0) { + /* square then multiply if the bit is set */ + for (x = 0; x < bitcpy; x++) { + if ((err = mp_sqr (&res, &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, mp)) != MP_OKAY) { + goto LBL_RES; + } + + /* get next bit of the window */ + bitbuf <<= 1; + if ((bitbuf & (1 << winsize)) != 0) { + /* then multiply */ + if ((err = mp_mul (&res, &M[1], &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, mp)) != MP_OKAY) { + goto LBL_RES; + } + } + } + } + + if (redmode == 0) { + /* fixup result if Montgomery reduction is used + * recall that any value in a Montgomery system is + * actually multiplied by R mod n. So we have + * to reduce one more time to cancel out the factor + * of R. + */ + if ((err = redux(&res, P, mp)) != MP_OKAY) { + goto LBL_RES; + } + } + + /* swap res with Y */ + mp_exch (&res, Y); + err = MP_OKAY; +LBL_RES:mp_clear (&res); +LBL_M: + mp_clear(&M[1]); + for (x = 1<<(winsize-1); x < (1 << winsize); x++) { + mp_clear (&M[x]); + } + return err; +} + + +/* setups the montgomery reduction stuff */ +int +mp_montgomery_setup (mp_int * n, mp_digit * rho) +{ + mp_digit x, b; + +/* fast inversion mod 2**k + * + * Based on the fact that + * + * XA = 1 (mod 2**n) => (X(2-XA)) A = 1 (mod 2**2n) + * => 2*X*A - X*X*A*A = 1 + * => 2*(1) - (1) = 1 + */ + b = n->dp[0]; + + if ((b & 1) == 0) { + return MP_VAL; + } + + x = (((b + 2) & 4) << 1) + b; /* here x*a==1 mod 2**4 */ + x *= 2 - b * x; /* here x*a==1 mod 2**8 */ +#if !defined(MP_8BIT) + x *= 2 - b * x; /* here x*a==1 mod 2**16 */ +#endif +#if defined(MP_64BIT) || !(defined(MP_8BIT) || defined(MP_16BIT)) + x *= 2 - b * x; /* here x*a==1 mod 2**32 */ +#endif +#ifdef MP_64BIT + x *= 2 - b * x; /* here x*a==1 mod 2**64 */ +#endif + + /* rho = -1/m mod b */ + /* TAO, switched mp_word casts to mp_digit to shut up compiler */ + *rho = (((mp_digit)1 << ((mp_digit) DIGIT_BIT)) - x) & MP_MASK; + + return MP_OKAY; +} + + +/* computes xR**-1 == x (mod N) via Montgomery Reduction + * + * This is an optimized implementation of montgomery_reduce + * which uses the comba method to quickly calculate the columns of the + * reduction. + * + * Based on Algorithm 14.32 on pp.601 of HAC. +*/ +int fast_mp_montgomery_reduce (mp_int * x, mp_int * n, mp_digit rho) +{ + int ix, res, olduse; +#ifdef WOLFSSL_SMALL_STACK + mp_word* W; /* uses dynamic memory and slower */ +#else + mp_word W[MP_WARRAY]; +#endif + + /* get old used count */ + olduse = x->used; + + /* grow a as required */ + if (x->alloc < n->used + 1) { + if ((res = mp_grow (x, n->used + 1)) != MP_OKAY) { + return res; + } + } + +#ifdef WOLFSSL_SMALL_STACK + W = (mp_word*)XMALLOC(sizeof(mp_word) * MP_WARRAY, 0, DYNAMIC_TYPE_BIGINT); + if (W == NULL) + return MP_MEM; +#endif + + /* first we have to get the digits of the input into + * an array of double precision words W[...] + */ + { + register mp_word *_W; + register mp_digit *tmpx; + + /* alias for the W[] array */ + _W = W; + + /* alias for the digits of x*/ + tmpx = x->dp; + + /* copy the digits of a into W[0..a->used-1] */ + for (ix = 0; ix < x->used; ix++) { + *_W++ = *tmpx++; + } + + /* zero the high words of W[a->used..m->used*2] */ + for (; ix < n->used * 2 + 1; ix++) { + *_W++ = 0; + } + } + + /* now we proceed to zero successive digits + * from the least significant upwards + */ + for (ix = 0; ix < n->used; ix++) { + /* mu = ai * m' mod b + * + * We avoid a double precision multiplication (which isn't required) + * by casting the value down to a mp_digit. Note this requires + * that W[ix-1] have the carry cleared (see after the inner loop) + */ + register mp_digit mu; + mu = (mp_digit) (((W[ix] & MP_MASK) * rho) & MP_MASK); + + /* a = a + mu * m * b**i + * + * This is computed in place and on the fly. The multiplication + * by b**i is handled by offseting which columns the results + * are added to. + * + * Note the comba method normally doesn't handle carries in the + * inner loop In this case we fix the carry from the previous + * column since the Montgomery reduction requires digits of the + * result (so far) [see above] to work. This is + * handled by fixing up one carry after the inner loop. The + * carry fixups are done in order so after these loops the + * first m->used words of W[] have the carries fixed + */ + { + register int iy; + register mp_digit *tmpn; + register mp_word *_W; + + /* alias for the digits of the modulus */ + tmpn = n->dp; + + /* Alias for the columns set by an offset of ix */ + _W = W + ix; + + /* inner loop */ + for (iy = 0; iy < n->used; iy++) { + *_W++ += ((mp_word)mu) * ((mp_word)*tmpn++); + } + } + + /* now fix carry for next digit, W[ix+1] */ + W[ix + 1] += W[ix] >> ((mp_word) DIGIT_BIT); + } + + /* now we have to propagate the carries and + * shift the words downward [all those least + * significant digits we zeroed]. + */ + { + register mp_digit *tmpx; + register mp_word *_W, *_W1; + + /* nox fix rest of carries */ + + /* alias for current word */ + _W1 = W + ix; + + /* alias for next word, where the carry goes */ + _W = W + ++ix; + + for (; ix <= n->used * 2 + 1; ix++) { + *_W++ += *_W1++ >> ((mp_word) DIGIT_BIT); + } + + /* copy out, A = A/b**n + * + * The result is A/b**n but instead of converting from an + * array of mp_word to mp_digit than calling mp_rshd + * we just copy them in the right order + */ + + /* alias for destination word */ + tmpx = x->dp; + + /* alias for shifted double precision result */ + _W = W + n->used; + + for (ix = 0; ix < n->used + 1; ix++) { + *tmpx++ = (mp_digit)(*_W++ & ((mp_word) MP_MASK)); + } + + /* zero oldused digits, if the input a was larger than + * m->used+1 we'll have to clear the digits + */ + for (; ix < olduse; ix++) { + *tmpx++ = 0; + } + } + + /* set the max used and clamp */ + x->used = n->used + 1; + mp_clamp (x); + +#ifdef WOLFSSL_SMALL_STACK + XFREE(W, 0, DYNAMIC_TYPE_BIGINT); +#endif + + /* if A >= m then A = A - m */ + if (mp_cmp_mag (x, n) != MP_LT) { + return s_mp_sub (x, n, x); + } + return MP_OKAY; +} + + +/* computes xR**-1 == x (mod N) via Montgomery Reduction */ +int +mp_montgomery_reduce (mp_int * x, mp_int * n, mp_digit rho) +{ + int ix, res, digs; + mp_digit mu; + + /* can the fast reduction [comba] method be used? + * + * Note that unlike in mul you're safely allowed *less* + * than the available columns [255 per default] since carries + * are fixed up in the inner loop. + */ + digs = n->used * 2 + 1; + if ((digs < MP_WARRAY) && + n->used < + (1 << ((CHAR_BIT * sizeof (mp_word)) - (2 * DIGIT_BIT)))) { + return fast_mp_montgomery_reduce (x, n, rho); + } + + /* grow the input as required */ + if (x->alloc < digs) { + if ((res = mp_grow (x, digs)) != MP_OKAY) { + return res; + } + } + x->used = digs; + + for (ix = 0; ix < n->used; ix++) { + /* mu = ai * rho mod b + * + * The value of rho must be precalculated via + * montgomery_setup() such that + * it equals -1/n0 mod b this allows the + * following inner loop to reduce the + * input one digit at a time + */ + mu = (mp_digit) (((mp_word)x->dp[ix]) * ((mp_word)rho) & MP_MASK); + + /* a = a + mu * m * b**i */ + { + register int iy; + register mp_digit *tmpn, *tmpx, u; + register mp_word r; + + /* alias for digits of the modulus */ + tmpn = n->dp; + + /* alias for the digits of x [the input] */ + tmpx = x->dp + ix; + + /* set the carry to zero */ + u = 0; + + /* Multiply and add in place */ + for (iy = 0; iy < n->used; iy++) { + /* compute product and sum */ + r = ((mp_word)mu) * ((mp_word)*tmpn++) + + ((mp_word) u) + ((mp_word) * tmpx); + + /* get carry */ + u = (mp_digit)(r >> ((mp_word) DIGIT_BIT)); + + /* fix digit */ + *tmpx++ = (mp_digit)(r & ((mp_word) MP_MASK)); + } + /* At this point the ix'th digit of x should be zero */ + + + /* propagate carries upwards as required*/ + while (u) { + *tmpx += u; + u = *tmpx >> DIGIT_BIT; + *tmpx++ &= MP_MASK; + } + } + } + + /* at this point the n.used'th least + * significant digits of x are all zero + * which means we can shift x to the + * right by n.used digits and the + * residue is unchanged. + */ + + /* x = x/b**n.used */ + mp_clamp(x); + mp_rshd (x, n->used); + + /* if x >= n then x = x - n */ + if (mp_cmp_mag (x, n) != MP_LT) { + return s_mp_sub (x, n, x); + } + + return MP_OKAY; +} + + +/* determines the setup value */ +void mp_dr_setup(mp_int *a, mp_digit *d) +{ + /* the casts are required if DIGIT_BIT is one less than + * the number of bits in a mp_digit [e.g. DIGIT_BIT==31] + */ + *d = (mp_digit)((((mp_word)1) << ((mp_word)DIGIT_BIT)) - + ((mp_word)a->dp[0])); +} + + +/* reduce "x" in place modulo "n" using the Diminished Radix algorithm. + * + * Based on algorithm from the paper + * + * "Generating Efficient Primes for Discrete Log Cryptosystems" + * Chae Hoon Lim, Pil Joong Lee, + * POSTECH Information Research Laboratories + * + * The modulus must be of a special format [see manual] + * + * Has been modified to use algorithm 7.10 from the LTM book instead + * + * Input x must be in the range 0 <= x <= (n-1)**2 + */ +int +mp_dr_reduce (mp_int * x, mp_int * n, mp_digit k) +{ + int err, i, m; + mp_word r; + mp_digit mu, *tmpx1, *tmpx2; + + /* m = digits in modulus */ + m = n->used; + + /* ensure that "x" has at least 2m digits */ + if (x->alloc < m + m) { + if ((err = mp_grow (x, m + m)) != MP_OKAY) { + return err; + } + } + +/* top of loop, this is where the code resumes if + * another reduction pass is required. + */ +top: + /* aliases for digits */ + /* alias for lower half of x */ + tmpx1 = x->dp; + + /* alias for upper half of x, or x/B**m */ + tmpx2 = x->dp + m; + + /* set carry to zero */ + mu = 0; + + /* compute (x mod B**m) + k * [x/B**m] inline and inplace */ + for (i = 0; i < m; i++) { + r = ((mp_word)*tmpx2++) * ((mp_word)k) + *tmpx1 + mu; + *tmpx1++ = (mp_digit)(r & MP_MASK); + mu = (mp_digit)(r >> ((mp_word)DIGIT_BIT)); + } + + /* set final carry */ + *tmpx1++ = mu; + + /* zero words above m */ + for (i = m + 1; i < x->used; i++) { + *tmpx1++ = 0; + } + + /* clamp, sub and return */ + mp_clamp (x); + + /* if x >= n then subtract and reduce again + * Each successive "recursion" makes the input smaller and smaller. + */ + if (mp_cmp_mag (x, n) != MP_LT) { + s_mp_sub(x, n, x); + goto top; + } + return MP_OKAY; +} + + +/* reduces a modulo n where n is of the form 2**p - d */ +int mp_reduce_2k(mp_int *a, mp_int *n, mp_digit d) +{ + mp_int q; + int p, res; + + if ((res = mp_init(&q)) != MP_OKAY) { + return res; + } + + p = mp_count_bits(n); +top: + /* q = a/2**p, a = a mod 2**p */ + if ((res = mp_div_2d(a, p, &q, a)) != MP_OKAY) { + goto ERR; + } + + if (d != 1) { + /* q = q * d */ + if ((res = mp_mul_d(&q, d, &q)) != MP_OKAY) { + goto ERR; + } + } + + /* a = a + q */ + if ((res = s_mp_add(a, &q, a)) != MP_OKAY) { + goto ERR; + } + + if (mp_cmp_mag(a, n) != MP_LT) { + s_mp_sub(a, n, a); + goto top; + } + +ERR: + mp_clear(&q); + return res; +} + + +/* determines the setup value */ +int mp_reduce_2k_setup(mp_int *a, mp_digit *d) +{ + int res, p; + mp_int tmp; + + if ((res = mp_init(&tmp)) != MP_OKAY) { + return res; + } + + p = mp_count_bits(a); + if ((res = mp_2expt(&tmp, p)) != MP_OKAY) { + mp_clear(&tmp); + return res; + } + + if ((res = s_mp_sub(&tmp, a, &tmp)) != MP_OKAY) { + mp_clear(&tmp); + return res; + } + + *d = tmp.dp[0]; + mp_clear(&tmp); + return MP_OKAY; +} + + +/* computes a = 2**b + * + * Simple algorithm which zeroes the int, grows it then just sets one bit + * as required. + */ +int +mp_2expt (mp_int * a, int b) +{ + int res; + + /* zero a as per default */ + mp_zero (a); + + /* grow a to accomodate the single bit */ + if ((res = mp_grow (a, b / DIGIT_BIT + 1)) != MP_OKAY) { + return res; + } + + /* set the used count of where the bit will go */ + a->used = b / DIGIT_BIT + 1; + + /* put the single bit in its place */ + a->dp[b / DIGIT_BIT] = ((mp_digit)1) << (b % DIGIT_BIT); + + return MP_OKAY; +} + + +/* multiply by a digit */ +int +mp_mul_d (mp_int * a, mp_digit b, mp_int * c) +{ + mp_digit u, *tmpa, *tmpc; + mp_word r; + int ix, res, olduse; + + /* make sure c is big enough to hold a*b */ + if (c->alloc < a->used + 1) { + if ((res = mp_grow (c, a->used + 1)) != MP_OKAY) { + return res; + } + } + + /* get the original destinations used count */ + olduse = c->used; + + /* set the sign */ + c->sign = a->sign; + + /* alias for a->dp [source] */ + tmpa = a->dp; + + /* alias for c->dp [dest] */ + tmpc = c->dp; + + /* zero carry */ + u = 0; + + /* compute columns */ + for (ix = 0; ix < a->used; ix++) { + /* compute product and carry sum for this term */ + r = ((mp_word) u) + ((mp_word)*tmpa++) * ((mp_word)b); + + /* mask off higher bits to get a single digit */ + *tmpc++ = (mp_digit) (r & ((mp_word) MP_MASK)); + + /* send carry into next iteration */ + u = (mp_digit) (r >> ((mp_word) DIGIT_BIT)); + } + + /* store final carry [if any] and increment ix offset */ + *tmpc++ = u; + ++ix; + + /* now zero digits above the top */ + while (ix++ < olduse) { + *tmpc++ = 0; + } + + /* set used count */ + c->used = a->used + 1; + mp_clamp(c); + + return MP_OKAY; +} + + +/* d = a * b (mod c) */ +int mp_mulmod (mp_int * a, mp_int * b, mp_int * c, mp_int * d) +{ + int res; + mp_int t; + + if ((res = mp_init (&t)) != MP_OKAY) { + return res; + } + + if ((res = mp_mul (a, b, &t)) != MP_OKAY) { + mp_clear (&t); + return res; + } + res = mp_mod (&t, c, d); + mp_clear (&t); + return res; +} + + +/* computes b = a*a */ +int +mp_sqr (mp_int * a, mp_int * b) +{ + int res; + + { +#ifdef BN_FAST_S_MP_SQR_C + /* can we use the fast comba multiplier? */ + if ((a->used * 2 + 1) < MP_WARRAY && + a->used < + (1 << (sizeof(mp_word) * CHAR_BIT - 2*DIGIT_BIT - 1))) { + res = fast_s_mp_sqr (a, b); + } else +#endif +#ifdef BN_S_MP_SQR_C + res = s_mp_sqr (a, b); +#else + res = MP_VAL; +#endif + } + b->sign = MP_ZPOS; + return res; +} + + +/* high level multiplication (handles sign) */ +int mp_mul (mp_int * a, mp_int * b, mp_int * c) +{ + int res, neg; + neg = (a->sign == b->sign) ? MP_ZPOS : MP_NEG; + + { + /* can we use the fast multiplier? + * + * The fast multiplier can be used if the output will + * have less than MP_WARRAY digits and the number of + * digits won't affect carry propagation + */ + int digs = a->used + b->used + 1; + +#ifdef BN_FAST_S_MP_MUL_DIGS_C + if ((digs < MP_WARRAY) && + MIN(a->used, b->used) <= + (1 << ((CHAR_BIT * sizeof (mp_word)) - (2 * DIGIT_BIT)))) { + res = fast_s_mp_mul_digs (a, b, c, digs); + } else +#endif +#ifdef BN_S_MP_MUL_DIGS_C + res = s_mp_mul (a, b, c); /* uses s_mp_mul_digs */ +#else + res = MP_VAL; +#endif + + } + c->sign = (c->used > 0) ? neg : MP_ZPOS; + return res; +} + + +/* b = a*2 */ +int mp_mul_2(mp_int * a, mp_int * b) +{ + int x, res, oldused; + + /* grow to accomodate result */ + if (b->alloc < a->used + 1) { + if ((res = mp_grow (b, a->used + 1)) != MP_OKAY) { + return res; + } + } + + oldused = b->used; + b->used = a->used; + + { + register mp_digit r, rr, *tmpa, *tmpb; + + /* alias for source */ + tmpa = a->dp; + + /* alias for dest */ + tmpb = b->dp; + + /* carry */ + r = 0; + for (x = 0; x < a->used; x++) { + + /* get what will be the *next* carry bit from the + * MSB of the current digit + */ + rr = *tmpa >> ((mp_digit)(DIGIT_BIT - 1)); + + /* now shift up this digit, add in the carry [from the previous] */ + *tmpb++ = ((*tmpa++ << ((mp_digit)1)) | r) & MP_MASK; + + /* copy the carry that would be from the source + * digit into the next iteration + */ + r = rr; + } + + /* new leading digit? */ + if (r != 0) { + /* add a MSB which is always 1 at this point */ + *tmpb = 1; + ++(b->used); + } + + /* now zero any excess digits on the destination + * that we didn't write to + */ + tmpb = b->dp + b->used; + for (x = b->used; x < oldused; x++) { + *tmpb++ = 0; + } + } + b->sign = a->sign; + return MP_OKAY; +} + + +/* divide by three (based on routine from MPI and the GMP manual) */ +int +mp_div_3 (mp_int * a, mp_int *c, mp_digit * d) +{ + mp_int q; + mp_word w, t; + mp_digit b; + int res, ix; + + /* b = 2**DIGIT_BIT / 3 */ + b = (((mp_word)1) << ((mp_word)DIGIT_BIT)) / ((mp_word)3); + + if ((res = mp_init_size(&q, a->used)) != MP_OKAY) { + return res; + } + + q.used = a->used; + q.sign = a->sign; + w = 0; + for (ix = a->used - 1; ix >= 0; ix--) { + w = (w << ((mp_word)DIGIT_BIT)) | ((mp_word)a->dp[ix]); + + if (w >= 3) { + /* multiply w by [1/3] */ + t = (w * ((mp_word)b)) >> ((mp_word)DIGIT_BIT); + + /* now subtract 3 * [w/3] from w, to get the remainder */ + w -= t+t+t; + + /* fixup the remainder as required since + * the optimization is not exact. + */ + while (w >= 3) { + t += 1; + w -= 3; + } + } else { + t = 0; + } + q.dp[ix] = (mp_digit)t; + } + + /* [optional] store the remainder */ + if (d != NULL) { + *d = (mp_digit)w; + } + + /* [optional] store the quotient */ + if (c != NULL) { + mp_clamp(&q); + mp_exch(&q, c); + } + mp_clear(&q); + + return res; +} + + +/* init an mp_init for a given size */ +int mp_init_size (mp_int * a, int size) +{ + int x; + + /* pad size so there are always extra digits */ + size += (MP_PREC * 2) - (size % MP_PREC); + + /* alloc mem */ + a->dp = OPT_CAST(mp_digit) XMALLOC (sizeof (mp_digit) * size, 0, + DYNAMIC_TYPE_BIGINT); + if (a->dp == NULL) { + return MP_MEM; + } + + /* set the members */ + a->used = 0; + a->alloc = size; + a->sign = MP_ZPOS; + + /* zero the digits */ + for (x = 0; x < size; x++) { + a->dp[x] = 0; + } + + return MP_OKAY; +} + + +/* the jist of squaring... + * you do like mult except the offset of the tmpx [one that + * starts closer to zero] can't equal the offset of tmpy. + * So basically you set up iy like before then you min it with + * (ty-tx) so that it never happens. You double all those + * you add in the inner loop + +After that loop you do the squares and add them in. +*/ + +int fast_s_mp_sqr (mp_int * a, mp_int * b) +{ + int olduse, res, pa, ix, iz; +#ifdef WOLFSSL_SMALL_STACK + mp_digit* W; /* uses dynamic memory and slower */ +#else + mp_digit W[MP_WARRAY]; +#endif + mp_digit *tmpx; + mp_word W1; + + /* grow the destination as required */ + pa = a->used + a->used; + if (b->alloc < pa) { + if ((res = mp_grow (b, pa)) != MP_OKAY) { + return res; + } + } + + if (pa > MP_WARRAY) + return MP_RANGE; /* TAO range check */ + +#ifdef WOLFSSL_SMALL_STACK + W = (mp_digit*)XMALLOC(sizeof(mp_digit) * MP_WARRAY, 0, DYNAMIC_TYPE_BIGINT); + if (W == NULL) + return MP_MEM; +#endif + + /* number of output digits to produce */ + W1 = 0; + for (ix = 0; ix < pa; ix++) { + int tx, ty, iy; + mp_word _W; + mp_digit *tmpy; + + /* clear counter */ + _W = 0; + + /* get offsets into the two bignums */ + ty = MIN(a->used-1, ix); + tx = ix - ty; + + /* setup temp aliases */ + tmpx = a->dp + tx; + tmpy = a->dp + ty; + + /* this is the number of times the loop will iterrate, essentially + while (tx++ < a->used && ty-- >= 0) { ... } + */ + iy = MIN(a->used-tx, ty+1); + + /* now for squaring tx can never equal ty + * we halve the distance since they approach at a rate of 2x + * and we have to round because odd cases need to be executed + */ + iy = MIN(iy, (ty-tx+1)>>1); + + /* execute loop */ + for (iz = 0; iz < iy; iz++) { + _W += ((mp_word)*tmpx++)*((mp_word)*tmpy--); + } + + /* double the inner product and add carry */ + _W = _W + _W + W1; + + /* even columns have the square term in them */ + if ((ix&1) == 0) { + _W += ((mp_word)a->dp[ix>>1])*((mp_word)a->dp[ix>>1]); + } + + /* store it */ + W[ix] = (mp_digit)(_W & MP_MASK); + + /* make next carry */ + W1 = _W >> ((mp_word)DIGIT_BIT); + } + + /* setup dest */ + olduse = b->used; + b->used = a->used+a->used; + + { + mp_digit *tmpb; + tmpb = b->dp; + for (ix = 0; ix < pa; ix++) { + *tmpb++ = W[ix] & MP_MASK; + } + + /* clear unused digits [that existed in the old copy of c] */ + for (; ix < olduse; ix++) { + *tmpb++ = 0; + } + } + mp_clamp (b); + +#ifdef WOLFSSL_SMALL_STACK + XFREE(W, 0, DYNAMIC_TYPE_BIGINT); +#endif + + return MP_OKAY; +} + + +/* Fast (comba) multiplier + * + * This is the fast column-array [comba] multiplier. It is + * designed to compute the columns of the product first + * then handle the carries afterwards. This has the effect + * of making the nested loops that compute the columns very + * simple and schedulable on super-scalar processors. + * + * This has been modified to produce a variable number of + * digits of output so if say only a half-product is required + * you don't have to compute the upper half (a feature + * required for fast Barrett reduction). + * + * Based on Algorithm 14.12 on pp.595 of HAC. + * + */ +int fast_s_mp_mul_digs (mp_int * a, mp_int * b, mp_int * c, int digs) +{ + int olduse, res, pa, ix, iz; +#ifdef WOLFSSL_SMALL_STACK + mp_digit* W; /* uses dynamic memory and slower */ +#else + mp_digit W[MP_WARRAY]; +#endif + register mp_word _W; + + /* grow the destination as required */ + if (c->alloc < digs) { + if ((res = mp_grow (c, digs)) != MP_OKAY) { + return res; + } + } + + /* number of output digits to produce */ + pa = MIN(digs, a->used + b->used); + if (pa > MP_WARRAY) + return MP_RANGE; /* TAO range check */ + +#ifdef WOLFSSL_SMALL_STACK + W = (mp_digit*)XMALLOC(sizeof(mp_digit) * MP_WARRAY, 0, DYNAMIC_TYPE_BIGINT); + if (W == NULL) + return MP_MEM; +#endif + + /* clear the carry */ + _W = 0; + for (ix = 0; ix < pa; ix++) { + int tx, ty; + int iy; + mp_digit *tmpx, *tmpy; + + /* get offsets into the two bignums */ + ty = MIN(b->used-1, ix); + tx = ix - ty; + + /* setup temp aliases */ + tmpx = a->dp + tx; + tmpy = b->dp + ty; + + /* this is the number of times the loop will iterrate, essentially + while (tx++ < a->used && ty-- >= 0) { ... } + */ + iy = MIN(a->used-tx, ty+1); + + /* execute loop */ + for (iz = 0; iz < iy; ++iz) { + _W += ((mp_word)*tmpx++)*((mp_word)*tmpy--); + + } + + /* store term */ + W[ix] = ((mp_digit)_W) & MP_MASK; + + /* make next carry */ + _W = _W >> ((mp_word)DIGIT_BIT); + } + + /* setup dest */ + olduse = c->used; + c->used = pa; + + { + register mp_digit *tmpc; + tmpc = c->dp; + for (ix = 0; ix < pa+1; ix++) { + /* now extract the previous digit [below the carry] */ + *tmpc++ = W[ix]; + } + + /* clear unused digits [that existed in the old copy of c] */ + for (; ix < olduse; ix++) { + *tmpc++ = 0; + } + } + mp_clamp (c); + +#ifdef WOLFSSL_SMALL_STACK + XFREE(W, 0, DYNAMIC_TYPE_BIGINT); +#endif + + return MP_OKAY; +} + + +/* low level squaring, b = a*a, HAC pp.596-597, Algorithm 14.16 */ +int s_mp_sqr (mp_int * a, mp_int * b) +{ + mp_int t; + int res, ix, iy, pa; + mp_word r; + mp_digit u, tmpx, *tmpt; + + pa = a->used; + if ((res = mp_init_size (&t, 2*pa + 1)) != MP_OKAY) { + return res; + } + + /* default used is maximum possible size */ + t.used = 2*pa + 1; + + for (ix = 0; ix < pa; ix++) { + /* first calculate the digit at 2*ix */ + /* calculate double precision result */ + r = ((mp_word) t.dp[2*ix]) + + ((mp_word)a->dp[ix])*((mp_word)a->dp[ix]); + + /* store lower part in result */ + t.dp[ix+ix] = (mp_digit) (r & ((mp_word) MP_MASK)); + + /* get the carry */ + u = (mp_digit)(r >> ((mp_word) DIGIT_BIT)); + + /* left hand side of A[ix] * A[iy] */ + tmpx = a->dp[ix]; + + /* alias for where to store the results */ + tmpt = t.dp + (2*ix + 1); + + for (iy = ix + 1; iy < pa; iy++) { + /* first calculate the product */ + r = ((mp_word)tmpx) * ((mp_word)a->dp[iy]); + + /* now calculate the double precision result, note we use + * addition instead of *2 since it's easier to optimize + */ + r = ((mp_word) *tmpt) + r + r + ((mp_word) u); + + /* store lower part */ + *tmpt++ = (mp_digit) (r & ((mp_word) MP_MASK)); + + /* get carry */ + u = (mp_digit)(r >> ((mp_word) DIGIT_BIT)); + } + /* propagate upwards */ + while (u != ((mp_digit) 0)) { + r = ((mp_word) *tmpt) + ((mp_word) u); + *tmpt++ = (mp_digit) (r & ((mp_word) MP_MASK)); + u = (mp_digit)(r >> ((mp_word) DIGIT_BIT)); + } + } + + mp_clamp (&t); + mp_exch (&t, b); + mp_clear (&t); + return MP_OKAY; +} + + +/* multiplies |a| * |b| and only computes upto digs digits of result + * HAC pp. 595, Algorithm 14.12 Modified so you can control how + * many digits of output are created. + */ +int s_mp_mul_digs (mp_int * a, mp_int * b, mp_int * c, int digs) +{ + mp_int t; + int res, pa, pb, ix, iy; + mp_digit u; + mp_word r; + mp_digit tmpx, *tmpt, *tmpy; + + /* can we use the fast multiplier? */ + if (((digs) < MP_WARRAY) && + MIN (a->used, b->used) < + (1 << ((CHAR_BIT * sizeof (mp_word)) - (2 * DIGIT_BIT)))) { + return fast_s_mp_mul_digs (a, b, c, digs); + } + + if ((res = mp_init_size (&t, digs)) != MP_OKAY) { + return res; + } + t.used = digs; + + /* compute the digits of the product directly */ + pa = a->used; + for (ix = 0; ix < pa; ix++) { + /* set the carry to zero */ + u = 0; + + /* limit ourselves to making digs digits of output */ + pb = MIN (b->used, digs - ix); + + /* setup some aliases */ + /* copy of the digit from a used within the nested loop */ + tmpx = a->dp[ix]; + + /* an alias for the destination shifted ix places */ + tmpt = t.dp + ix; + + /* an alias for the digits of b */ + tmpy = b->dp; + + /* compute the columns of the output and propagate the carry */ + for (iy = 0; iy < pb; iy++) { + /* compute the column as a mp_word */ + r = ((mp_word)*tmpt) + + ((mp_word)tmpx) * ((mp_word)*tmpy++) + + ((mp_word) u); + + /* the new column is the lower part of the result */ + *tmpt++ = (mp_digit) (r & ((mp_word) MP_MASK)); + + /* get the carry word from the result */ + u = (mp_digit) (r >> ((mp_word) DIGIT_BIT)); + } + /* set carry if it is placed below digs */ + if (ix + iy < digs) { + *tmpt = u; + } + } + + mp_clamp (&t); + mp_exch (&t, c); + + mp_clear (&t); + return MP_OKAY; +} + + +/* + * shifts with subtractions when the result is greater than b. + * + * The method is slightly modified to shift B unconditionally upto just under + * the leading bit of b. This saves alot of multiple precision shifting. + */ +int mp_montgomery_calc_normalization (mp_int * a, mp_int * b) +{ + int x, bits, res; + + /* how many bits of last digit does b use */ + bits = mp_count_bits (b) % DIGIT_BIT; + + if (b->used > 1) { + if ((res = mp_2expt (a, (b->used - 1) * DIGIT_BIT + bits - 1)) != MP_OKAY) { + return res; + } + } else { + mp_set(a, 1); + bits = 1; + } + + + /* now compute C = A * B mod b */ + for (x = bits - 1; x < (int)DIGIT_BIT; x++) { + if ((res = mp_mul_2 (a, a)) != MP_OKAY) { + return res; + } + if (mp_cmp_mag (a, b) != MP_LT) { + if ((res = s_mp_sub (a, b, a)) != MP_OKAY) { + return res; + } + } + } + + return MP_OKAY; +} + + +#ifdef MP_LOW_MEM + #define TAB_SIZE 32 +#else + #define TAB_SIZE 256 +#endif + +int s_mp_exptmod (mp_int * G, mp_int * X, mp_int * P, mp_int * Y, int redmode) +{ + mp_int M[TAB_SIZE], res, mu; + mp_digit buf; + int err, bitbuf, bitcpy, bitcnt, mode, digidx, x, y, winsize; + int (*redux)(mp_int*,mp_int*,mp_int*); + + /* find window size */ + x = mp_count_bits (X); + if (x <= 7) { + winsize = 2; + } else if (x <= 36) { + winsize = 3; + } else if (x <= 140) { + winsize = 4; + } else if (x <= 450) { + winsize = 5; + } else if (x <= 1303) { + winsize = 6; + } else if (x <= 3529) { + winsize = 7; + } else { + winsize = 8; + } + +#ifdef MP_LOW_MEM + if (winsize > 5) { + winsize = 5; + } +#endif + + /* init M array */ + /* init first cell */ + if ((err = mp_init(&M[1])) != MP_OKAY) { + return err; + } + + /* now init the second half of the array */ + for (x = 1<<(winsize-1); x < (1 << winsize); x++) { + if ((err = mp_init(&M[x])) != MP_OKAY) { + for (y = 1<<(winsize-1); y < x; y++) { + mp_clear (&M[y]); + } + mp_clear(&M[1]); + return err; + } + } + + /* create mu, used for Barrett reduction */ + if ((err = mp_init (&mu)) != MP_OKAY) { + goto LBL_M; + } + + if (redmode == 0) { + if ((err = mp_reduce_setup (&mu, P)) != MP_OKAY) { + goto LBL_MU; + } + redux = mp_reduce; + } else { + if ((err = mp_reduce_2k_setup_l (P, &mu)) != MP_OKAY) { + goto LBL_MU; + } + redux = mp_reduce_2k_l; + } + + /* create M table + * + * The M table contains powers of the base, + * e.g. M[x] = G**x mod P + * + * The first half of the table is not + * computed though accept for M[0] and M[1] + */ + if ((err = mp_mod (G, P, &M[1])) != MP_OKAY) { + goto LBL_MU; + } + + /* compute the value at M[1<<(winsize-1)] by squaring + * M[1] (winsize-1) times + */ + if ((err = mp_copy (&M[1], &M[(mp_digit)(1 << (winsize - 1))])) != MP_OKAY) { + goto LBL_MU; + } + + for (x = 0; x < (winsize - 1); x++) { + /* square it */ + if ((err = mp_sqr (&M[(mp_digit)(1 << (winsize - 1))], + &M[(mp_digit)(1 << (winsize - 1))])) != MP_OKAY) { + goto LBL_MU; + } + + /* reduce modulo P */ + if ((err = redux (&M[(mp_digit)(1 << (winsize - 1))], P, &mu)) != MP_OKAY) { + goto LBL_MU; + } + } + + /* create upper table, that is M[x] = M[x-1] * M[1] (mod P) + * for x = (2**(winsize - 1) + 1) to (2**winsize - 1) + */ + for (x = (1 << (winsize - 1)) + 1; x < (1 << winsize); x++) { + if ((err = mp_mul (&M[x - 1], &M[1], &M[x])) != MP_OKAY) { + goto LBL_MU; + } + if ((err = redux (&M[x], P, &mu)) != MP_OKAY) { + goto LBL_MU; + } + } + + /* setup result */ + if ((err = mp_init (&res)) != MP_OKAY) { + goto LBL_MU; + } + mp_set (&res, 1); + + /* set initial mode and bit cnt */ + mode = 0; + bitcnt = 1; + buf = 0; + digidx = X->used - 1; + bitcpy = 0; + bitbuf = 0; + + for (;;) { + /* grab next digit as required */ + if (--bitcnt == 0) { + /* if digidx == -1 we are out of digits */ + if (digidx == -1) { + break; + } + /* read next digit and reset the bitcnt */ + buf = X->dp[digidx--]; + bitcnt = (int) DIGIT_BIT; + } + + /* grab the next msb from the exponent */ + y = (int)(buf >> (mp_digit)(DIGIT_BIT - 1)) & 1; + buf <<= (mp_digit)1; + + /* if the bit is zero and mode == 0 then we ignore it + * These represent the leading zero bits before the first 1 bit + * in the exponent. Technically this opt is not required but it + * does lower the # of trivial squaring/reductions used + */ + if (mode == 0 && y == 0) { + continue; + } + + /* if the bit is zero and mode == 1 then we square */ + if (mode == 1 && y == 0) { + if ((err = mp_sqr (&res, &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, &mu)) != MP_OKAY) { + goto LBL_RES; + } + continue; + } + + /* else we add it to the window */ + bitbuf |= (y << (winsize - ++bitcpy)); + mode = 2; + + if (bitcpy == winsize) { + /* ok window is filled so square as required and multiply */ + /* square first */ + for (x = 0; x < winsize; x++) { + if ((err = mp_sqr (&res, &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, &mu)) != MP_OKAY) { + goto LBL_RES; + } + } + + /* then multiply */ + if ((err = mp_mul (&res, &M[bitbuf], &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, &mu)) != MP_OKAY) { + goto LBL_RES; + } + + /* empty window and reset */ + bitcpy = 0; + bitbuf = 0; + mode = 1; + } + } + + /* if bits remain then square/multiply */ + if (mode == 2 && bitcpy > 0) { + /* square then multiply if the bit is set */ + for (x = 0; x < bitcpy; x++) { + if ((err = mp_sqr (&res, &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, &mu)) != MP_OKAY) { + goto LBL_RES; + } + + bitbuf <<= 1; + if ((bitbuf & (1 << winsize)) != 0) { + /* then multiply */ + if ((err = mp_mul (&res, &M[1], &res)) != MP_OKAY) { + goto LBL_RES; + } + if ((err = redux (&res, P, &mu)) != MP_OKAY) { + goto LBL_RES; + } + } + } + } + + mp_exch (&res, Y); + err = MP_OKAY; +LBL_RES:mp_clear (&res); +LBL_MU:mp_clear (&mu); +LBL_M: + mp_clear(&M[1]); + for (x = 1<<(winsize-1); x < (1 << winsize); x++) { + mp_clear (&M[x]); + } + return err; +} + + +/* pre-calculate the value required for Barrett reduction + * For a given modulus "b" it calulates the value required in "a" + */ +int mp_reduce_setup (mp_int * a, mp_int * b) +{ + int res; + + if ((res = mp_2expt (a, b->used * 2 * DIGIT_BIT)) != MP_OKAY) { + return res; + } + return mp_div (a, b, a, NULL); +} + + +/* reduces x mod m, assumes 0 < x < m**2, mu is + * precomputed via mp_reduce_setup. + * From HAC pp.604 Algorithm 14.42 + */ +int mp_reduce (mp_int * x, mp_int * m, mp_int * mu) +{ + mp_int q; + int res, um = m->used; + + /* q = x */ + if ((res = mp_init_copy (&q, x)) != MP_OKAY) { + return res; + } + + /* q1 = x / b**(k-1) */ + mp_rshd (&q, um - 1); + + /* according to HAC this optimization is ok */ + if (((mp_word) um) > (((mp_digit)1) << (DIGIT_BIT - 1))) { + if ((res = mp_mul (&q, mu, &q)) != MP_OKAY) { + goto CLEANUP; + } + } else { +#ifdef BN_S_MP_MUL_HIGH_DIGS_C + if ((res = s_mp_mul_high_digs (&q, mu, &q, um)) != MP_OKAY) { + goto CLEANUP; + } +#elif defined(BN_FAST_S_MP_MUL_HIGH_DIGS_C) + if ((res = fast_s_mp_mul_high_digs (&q, mu, &q, um)) != MP_OKAY) { + goto CLEANUP; + } +#else + { + res = MP_VAL; + goto CLEANUP; + } +#endif + } + + /* q3 = q2 / b**(k+1) */ + mp_rshd (&q, um + 1); + + /* x = x mod b**(k+1), quick (no division) */ + if ((res = mp_mod_2d (x, DIGIT_BIT * (um + 1), x)) != MP_OKAY) { + goto CLEANUP; + } + + /* q = q * m mod b**(k+1), quick (no division) */ + if ((res = s_mp_mul_digs (&q, m, &q, um + 1)) != MP_OKAY) { + goto CLEANUP; + } + + /* x = x - q */ + if ((res = mp_sub (x, &q, x)) != MP_OKAY) { + goto CLEANUP; + } + + /* If x < 0, add b**(k+1) to it */ + if (mp_cmp_d (x, 0) == MP_LT) { + mp_set (&q, 1); + if ((res = mp_lshd (&q, um + 1)) != MP_OKAY) + goto CLEANUP; + if ((res = mp_add (x, &q, x)) != MP_OKAY) + goto CLEANUP; + } + + /* Back off if it's too big */ + while (mp_cmp (x, m) != MP_LT) { + if ((res = s_mp_sub (x, m, x)) != MP_OKAY) { + goto CLEANUP; + } + } + +CLEANUP: + mp_clear (&q); + + return res; +} + + +/* reduces a modulo n where n is of the form 2**p - d + This differs from reduce_2k since "d" can be larger + than a single digit. +*/ +int mp_reduce_2k_l(mp_int *a, mp_int *n, mp_int *d) +{ + mp_int q; + int p, res; + + if ((res = mp_init(&q)) != MP_OKAY) { + return res; + } + + p = mp_count_bits(n); +top: + /* q = a/2**p, a = a mod 2**p */ + if ((res = mp_div_2d(a, p, &q, a)) != MP_OKAY) { + goto ERR; + } + + /* q = q * d */ + if ((res = mp_mul(&q, d, &q)) != MP_OKAY) { + goto ERR; + } + + /* a = a + q */ + if ((res = s_mp_add(a, &q, a)) != MP_OKAY) { + goto ERR; + } + + if (mp_cmp_mag(a, n) != MP_LT) { + s_mp_sub(a, n, a); + goto top; + } + +ERR: + mp_clear(&q); + return res; +} + + +/* determines the setup value */ +int mp_reduce_2k_setup_l(mp_int *a, mp_int *d) +{ + int res; + mp_int tmp; + + if ((res = mp_init(&tmp)) != MP_OKAY) { + return res; + } + + if ((res = mp_2expt(&tmp, mp_count_bits(a))) != MP_OKAY) { + goto ERR; + } + + if ((res = s_mp_sub(&tmp, a, d)) != MP_OKAY) { + goto ERR; + } + +ERR: + mp_clear(&tmp); + return res; +} + + +/* multiplies |a| * |b| and does not compute the lower digs digits + * [meant to get the higher part of the product] + */ +int +s_mp_mul_high_digs (mp_int * a, mp_int * b, mp_int * c, int digs) +{ + mp_int t; + int res, pa, pb, ix, iy; + mp_digit u; + mp_word r; + mp_digit tmpx, *tmpt, *tmpy; + + /* can we use the fast multiplier? */ +#ifdef BN_FAST_S_MP_MUL_HIGH_DIGS_C + if (((a->used + b->used + 1) < MP_WARRAY) + && MIN (a->used, b->used) < (1 << ((CHAR_BIT * sizeof (mp_word)) - (2 * DIGIT_BIT)))) { + return fast_s_mp_mul_high_digs (a, b, c, digs); + } +#endif + + if ((res = mp_init_size (&t, a->used + b->used + 1)) != MP_OKAY) { + return res; + } + t.used = a->used + b->used + 1; + + pa = a->used; + pb = b->used; + for (ix = 0; ix < pa; ix++) { + /* clear the carry */ + u = 0; + + /* left hand side of A[ix] * B[iy] */ + tmpx = a->dp[ix]; + + /* alias to the address of where the digits will be stored */ + tmpt = &(t.dp[digs]); + + /* alias for where to read the right hand side from */ + tmpy = b->dp + (digs - ix); + + for (iy = digs - ix; iy < pb; iy++) { + /* calculate the double precision result */ + r = ((mp_word)*tmpt) + + ((mp_word)tmpx) * ((mp_word)*tmpy++) + + ((mp_word) u); + + /* get the lower part */ + *tmpt++ = (mp_digit) (r & ((mp_word) MP_MASK)); + + /* carry the carry */ + u = (mp_digit) (r >> ((mp_word) DIGIT_BIT)); + } + *tmpt = u; + } + mp_clamp (&t); + mp_exch (&t, c); + mp_clear (&t); + return MP_OKAY; +} + + +/* this is a modified version of fast_s_mul_digs that only produces + * output digits *above* digs. See the comments for fast_s_mul_digs + * to see how it works. + * + * This is used in the Barrett reduction since for one of the multiplications + * only the higher digits were needed. This essentially halves the work. + * + * Based on Algorithm 14.12 on pp.595 of HAC. + */ +int fast_s_mp_mul_high_digs (mp_int * a, mp_int * b, mp_int * c, int digs) +{ + int olduse, res, pa, ix, iz; +#ifdef WOLFSSL_SMALL_STACK + mp_digit* W; /* uses dynamic memory and slower */ +#else + mp_digit W[MP_WARRAY]; +#endif + mp_word _W; + + /* grow the destination as required */ + pa = a->used + b->used; + if (c->alloc < pa) { + if ((res = mp_grow (c, pa)) != MP_OKAY) { + return res; + } + } + + if (pa > MP_WARRAY) + return MP_RANGE; /* TAO range check */ + +#ifdef WOLFSSL_SMALL_STACK + W = (mp_digit*)XMALLOC(sizeof(mp_digit) * MP_WARRAY, 0, DYNAMIC_TYPE_BIGINT); + if (W == NULL) + return MP_MEM; +#endif + + /* number of output digits to produce */ + pa = a->used + b->used; + _W = 0; + for (ix = digs; ix < pa; ix++) { + int tx, ty, iy; + mp_digit *tmpx, *tmpy; + + /* get offsets into the two bignums */ + ty = MIN(b->used-1, ix); + tx = ix - ty; + + /* setup temp aliases */ + tmpx = a->dp + tx; + tmpy = b->dp + ty; + + /* this is the number of times the loop will iterrate, essentially its + while (tx++ < a->used && ty-- >= 0) { ... } + */ + iy = MIN(a->used-tx, ty+1); + + /* execute loop */ + for (iz = 0; iz < iy; iz++) { + _W += ((mp_word)*tmpx++)*((mp_word)*tmpy--); + } + + /* store term */ + W[ix] = ((mp_digit)_W) & MP_MASK; + + /* make next carry */ + _W = _W >> ((mp_word)DIGIT_BIT); + } + + /* setup dest */ + olduse = c->used; + c->used = pa; + + { + register mp_digit *tmpc; + + tmpc = c->dp + digs; + for (ix = digs; ix <= pa; ix++) { + /* now extract the previous digit [below the carry] */ + *tmpc++ = W[ix]; + } + + /* clear unused digits [that existed in the old copy of c] */ + for (; ix < olduse; ix++) { + *tmpc++ = 0; + } + } + mp_clamp (c); + +#ifdef WOLFSSL_SMALL_STACK + XFREE(W, 0, DYNAMIC_TYPE_BIGINT); +#endif + + return MP_OKAY; +} + + +/* set a 32-bit const */ +int mp_set_int (mp_int * a, unsigned long b) +{ + int x, res; + + mp_zero (a); + + /* set four bits at a time */ + for (x = 0; x < 8; x++) { + /* shift the number up four bits */ + if ((res = mp_mul_2d (a, 4, a)) != MP_OKAY) { + return res; + } + + /* OR in the top four bits of the source */ + a->dp[0] |= (b >> 28) & 15; + + /* shift the source up to the next four bits */ + b <<= 4; + + /* ensure that digits are not clamped off */ + a->used += 1; + } + mp_clamp (a); + return MP_OKAY; +} + + +#if defined(WOLFSSL_KEY_GEN) || defined(HAVE_ECC) + +/* c = a * a (mod b) */ +int mp_sqrmod (mp_int * a, mp_int * b, mp_int * c) +{ + int res; + mp_int t; + + if ((res = mp_init (&t)) != MP_OKAY) { + return res; + } + + if ((res = mp_sqr (a, &t)) != MP_OKAY) { + mp_clear (&t); + return res; + } + res = mp_mod (&t, b, c); + mp_clear (&t); + return res; +} + +#endif + + +#if defined(HAVE_ECC) || !defined(NO_PWDBASED) || defined(WOLFSSL_SNIFFER) || defined(WOLFSSL_HAVE_WOLFSCEP) || defined(WOLFSSL_KEY_GEN) + +/* single digit addition */ +int mp_add_d (mp_int* a, mp_digit b, mp_int* c) +{ + int res, ix, oldused; + mp_digit *tmpa, *tmpc, mu; + + /* grow c as required */ + if (c->alloc < a->used + 1) { + if ((res = mp_grow(c, a->used + 1)) != MP_OKAY) { + return res; + } + } + + /* if a is negative and |a| >= b, call c = |a| - b */ + if (a->sign == MP_NEG && (a->used > 1 || a->dp[0] >= b)) { + /* temporarily fix sign of a */ + a->sign = MP_ZPOS; + + /* c = |a| - b */ + res = mp_sub_d(a, b, c); + + /* fix sign */ + a->sign = c->sign = MP_NEG; + + /* clamp */ + mp_clamp(c); + + return res; + } + + /* old number of used digits in c */ + oldused = c->used; + + /* sign always positive */ + c->sign = MP_ZPOS; + + /* source alias */ + tmpa = a->dp; + + /* destination alias */ + tmpc = c->dp; + + /* if a is positive */ + if (a->sign == MP_ZPOS) { + /* add digit, after this we're propagating + * the carry. + */ + *tmpc = *tmpa++ + b; + mu = *tmpc >> DIGIT_BIT; + *tmpc++ &= MP_MASK; + + /* now handle rest of the digits */ + for (ix = 1; ix < a->used; ix++) { + *tmpc = *tmpa++ + mu; + mu = *tmpc >> DIGIT_BIT; + *tmpc++ &= MP_MASK; + } + /* set final carry */ + if (mu != 0 && ix < c->alloc) { + ix++; + *tmpc++ = mu; + } + + /* setup size */ + c->used = a->used + 1; + } else { + /* a was negative and |a| < b */ + c->used = 1; + + /* the result is a single digit */ + if (a->used == 1) { + *tmpc++ = b - a->dp[0]; + } else { + *tmpc++ = b; + } + + /* setup count so the clearing of oldused + * can fall through correctly + */ + ix = 1; + } + + /* now zero to oldused */ + while (ix++ < oldused) { + *tmpc++ = 0; + } + mp_clamp(c); + + return MP_OKAY; +} + + +/* single digit subtraction */ +int mp_sub_d (mp_int * a, mp_digit b, mp_int * c) +{ + mp_digit *tmpa, *tmpc, mu; + int res, ix, oldused; + + /* grow c as required */ + if (c->alloc < a->used + 1) { + if ((res = mp_grow(c, a->used + 1)) != MP_OKAY) { + return res; + } + } + + /* if a is negative just do an unsigned + * addition [with fudged signs] + */ + if (a->sign == MP_NEG) { + a->sign = MP_ZPOS; + res = mp_add_d(a, b, c); + a->sign = c->sign = MP_NEG; + + /* clamp */ + mp_clamp(c); + + return res; + } + + /* setup regs */ + oldused = c->used; + tmpa = a->dp; + tmpc = c->dp; + + /* if a <= b simply fix the single digit */ + if ((a->used == 1 && a->dp[0] <= b) || a->used == 0) { + if (a->used == 1) { + *tmpc++ = b - *tmpa; + } else { + *tmpc++ = b; + } + ix = 1; + + /* negative/1digit */ + c->sign = MP_NEG; + c->used = 1; + } else { + /* positive/size */ + c->sign = MP_ZPOS; + c->used = a->used; + + /* subtract first digit */ + *tmpc = *tmpa++ - b; + mu = *tmpc >> (sizeof(mp_digit) * CHAR_BIT - 1); + *tmpc++ &= MP_MASK; + + /* handle rest of the digits */ + for (ix = 1; ix < a->used; ix++) { + *tmpc = *tmpa++ - mu; + mu = *tmpc >> (sizeof(mp_digit) * CHAR_BIT - 1); + *tmpc++ &= MP_MASK; + } + } + + /* zero excess digits */ + while (ix++ < oldused) { + *tmpc++ = 0; + } + mp_clamp(c); + return MP_OKAY; +} + +#endif /* defined(HAVE_ECC) || !defined(NO_PWDBASED) */ + + +#if defined(WOLFSSL_KEY_GEN) || defined(HAVE_COMP_KEY) + +static const int lnz[16] = { + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 +}; + +/* Counts the number of lsbs which are zero before the first zero bit */ +int mp_cnt_lsb(mp_int *a) +{ + int x; + mp_digit q, qq; + + /* easy out */ + if (mp_iszero(a) == 1) { + return 0; + } + + /* scan lower digits until non-zero */ + for (x = 0; x < a->used && a->dp[x] == 0; x++); + q = a->dp[x]; + x *= DIGIT_BIT; + + /* now scan this digit until a 1 is found */ + if ((q & 1) == 0) { + do { + qq = q & 15; + x += lnz[qq]; + q >>= 4; + } while (qq == 0); + } + return x; +} + + + + +static int s_is_power_of_two(mp_digit b, int *p) +{ + int x; + + /* fast return if no power of two */ + if ((b==0) || (b & (b-1))) { + return 0; + } + + for (x = 0; x < DIGIT_BIT; x++) { + if (b == (((mp_digit)1)<dp[0] & ((((mp_digit)1)<used)) != MP_OKAY) { + return res; + } + + q.used = a->used; + q.sign = a->sign; + w = 0; + for (ix = a->used - 1; ix >= 0; ix--) { + w = (w << ((mp_word)DIGIT_BIT)) | ((mp_word)a->dp[ix]); + + if (w >= b) { + t = (mp_digit)(w / b); + w -= ((mp_word)t) * ((mp_word)b); + } else { + t = 0; + } + q.dp[ix] = (mp_digit)t; + } + + if (d != NULL) { + *d = (mp_digit)w; + } + + if (c != NULL) { + mp_clamp(&q); + mp_exch(&q, c); + } + mp_clear(&q); + + return res; +} + + +int mp_mod_d (mp_int * a, mp_digit b, mp_digit * c) +{ + return mp_div_d(a, b, NULL, c); +} + +#endif /* defined(WOLFSSL_KEY_GEN) || defined(HAVE_COMP_KEY) */ + +#ifdef WOLFSSL_KEY_GEN + +const mp_digit ltm_prime_tab[] = { + 0x0002, 0x0003, 0x0005, 0x0007, 0x000B, 0x000D, 0x0011, 0x0013, + 0x0017, 0x001D, 0x001F, 0x0025, 0x0029, 0x002B, 0x002F, 0x0035, + 0x003B, 0x003D, 0x0043, 0x0047, 0x0049, 0x004F, 0x0053, 0x0059, + 0x0061, 0x0065, 0x0067, 0x006B, 0x006D, 0x0071, 0x007F, +#ifndef MP_8BIT + 0x0083, + 0x0089, 0x008B, 0x0095, 0x0097, 0x009D, 0x00A3, 0x00A7, 0x00AD, + 0x00B3, 0x00B5, 0x00BF, 0x00C1, 0x00C5, 0x00C7, 0x00D3, 0x00DF, + 0x00E3, 0x00E5, 0x00E9, 0x00EF, 0x00F1, 0x00FB, 0x0101, 0x0107, + 0x010D, 0x010F, 0x0115, 0x0119, 0x011B, 0x0125, 0x0133, 0x0137, + + 0x0139, 0x013D, 0x014B, 0x0151, 0x015B, 0x015D, 0x0161, 0x0167, + 0x016F, 0x0175, 0x017B, 0x017F, 0x0185, 0x018D, 0x0191, 0x0199, + 0x01A3, 0x01A5, 0x01AF, 0x01B1, 0x01B7, 0x01BB, 0x01C1, 0x01C9, + 0x01CD, 0x01CF, 0x01D3, 0x01DF, 0x01E7, 0x01EB, 0x01F3, 0x01F7, + 0x01FD, 0x0209, 0x020B, 0x021D, 0x0223, 0x022D, 0x0233, 0x0239, + 0x023B, 0x0241, 0x024B, 0x0251, 0x0257, 0x0259, 0x025F, 0x0265, + 0x0269, 0x026B, 0x0277, 0x0281, 0x0283, 0x0287, 0x028D, 0x0293, + 0x0295, 0x02A1, 0x02A5, 0x02AB, 0x02B3, 0x02BD, 0x02C5, 0x02CF, + + 0x02D7, 0x02DD, 0x02E3, 0x02E7, 0x02EF, 0x02F5, 0x02F9, 0x0301, + 0x0305, 0x0313, 0x031D, 0x0329, 0x032B, 0x0335, 0x0337, 0x033B, + 0x033D, 0x0347, 0x0355, 0x0359, 0x035B, 0x035F, 0x036D, 0x0371, + 0x0373, 0x0377, 0x038B, 0x038F, 0x0397, 0x03A1, 0x03A9, 0x03AD, + 0x03B3, 0x03B9, 0x03C7, 0x03CB, 0x03D1, 0x03D7, 0x03DF, 0x03E5, + 0x03F1, 0x03F5, 0x03FB, 0x03FD, 0x0407, 0x0409, 0x040F, 0x0419, + 0x041B, 0x0425, 0x0427, 0x042D, 0x043F, 0x0443, 0x0445, 0x0449, + 0x044F, 0x0455, 0x045D, 0x0463, 0x0469, 0x047F, 0x0481, 0x048B, + + 0x0493, 0x049D, 0x04A3, 0x04A9, 0x04B1, 0x04BD, 0x04C1, 0x04C7, + 0x04CD, 0x04CF, 0x04D5, 0x04E1, 0x04EB, 0x04FD, 0x04FF, 0x0503, + 0x0509, 0x050B, 0x0511, 0x0515, 0x0517, 0x051B, 0x0527, 0x0529, + 0x052F, 0x0551, 0x0557, 0x055D, 0x0565, 0x0577, 0x0581, 0x058F, + 0x0593, 0x0595, 0x0599, 0x059F, 0x05A7, 0x05AB, 0x05AD, 0x05B3, + 0x05BF, 0x05C9, 0x05CB, 0x05CF, 0x05D1, 0x05D5, 0x05DB, 0x05E7, + 0x05F3, 0x05FB, 0x0607, 0x060D, 0x0611, 0x0617, 0x061F, 0x0623, + 0x062B, 0x062F, 0x063D, 0x0641, 0x0647, 0x0649, 0x064D, 0x0653 +#endif +}; + + +/* Miller-Rabin test of "a" to the base of "b" as described in + * HAC pp. 139 Algorithm 4.24 + * + * Sets result to 0 if definitely composite or 1 if probably prime. + * Randomly the chance of error is no more than 1/4 and often + * very much lower. + */ +static int mp_prime_miller_rabin (mp_int * a, mp_int * b, int *result) +{ + mp_int n1, y, r; + int s, j, err; + + /* default */ + *result = MP_NO; + + /* ensure b > 1 */ + if (mp_cmp_d(b, 1) != MP_GT) { + return MP_VAL; + } + + /* get n1 = a - 1 */ + if ((err = mp_init_copy (&n1, a)) != MP_OKAY) { + return err; + } + if ((err = mp_sub_d (&n1, 1, &n1)) != MP_OKAY) { + goto LBL_N1; + } + + /* set 2**s * r = n1 */ + if ((err = mp_init_copy (&r, &n1)) != MP_OKAY) { + goto LBL_N1; + } + + /* count the number of least significant bits + * which are zero + */ + s = mp_cnt_lsb(&r); + + /* now divide n - 1 by 2**s */ + if ((err = mp_div_2d (&r, s, &r, NULL)) != MP_OKAY) { + goto LBL_R; + } + + /* compute y = b**r mod a */ + if ((err = mp_init (&y)) != MP_OKAY) { + goto LBL_R; + } + if ((err = mp_exptmod (b, &r, a, &y)) != MP_OKAY) { + goto LBL_Y; + } + + /* if y != 1 and y != n1 do */ + if (mp_cmp_d (&y, 1) != MP_EQ && mp_cmp (&y, &n1) != MP_EQ) { + j = 1; + /* while j <= s-1 and y != n1 */ + while ((j <= (s - 1)) && mp_cmp (&y, &n1) != MP_EQ) { + if ((err = mp_sqrmod (&y, a, &y)) != MP_OKAY) { + goto LBL_Y; + } + + /* if y == 1 then composite */ + if (mp_cmp_d (&y, 1) == MP_EQ) { + goto LBL_Y; + } + + ++j; + } + + /* if y != n1 then composite */ + if (mp_cmp (&y, &n1) != MP_EQ) { + goto LBL_Y; + } + } + + /* probably prime now */ + *result = MP_YES; +LBL_Y:mp_clear (&y); +LBL_R:mp_clear (&r); +LBL_N1:mp_clear (&n1); + return err; +} + + +/* determines if an integers is divisible by one + * of the first PRIME_SIZE primes or not + * + * sets result to 0 if not, 1 if yes + */ +static int mp_prime_is_divisible (mp_int * a, int *result) +{ + int err, ix; + mp_digit res; + + /* default to not */ + *result = MP_NO; + + for (ix = 0; ix < PRIME_SIZE; ix++) { + /* what is a mod LBL_prime_tab[ix] */ + if ((err = mp_mod_d (a, ltm_prime_tab[ix], &res)) != MP_OKAY) { + return err; + } + + /* is the residue zero? */ + if (res == 0) { + *result = MP_YES; + return MP_OKAY; + } + } + + return MP_OKAY; +} + + +/* + * Sets result to 1 if probably prime, 0 otherwise + */ +int mp_prime_is_prime (mp_int * a, int t, int *result) +{ + mp_int b; + int ix, err, res; + + /* default to no */ + *result = MP_NO; + + /* valid value of t? */ + if (t <= 0 || t > PRIME_SIZE) { + return MP_VAL; + } + + /* is the input equal to one of the primes in the table? */ + for (ix = 0; ix < PRIME_SIZE; ix++) { + if (mp_cmp_d(a, ltm_prime_tab[ix]) == MP_EQ) { + *result = 1; + return MP_OKAY; + } + } + + /* first perform trial division */ + if ((err = mp_prime_is_divisible (a, &res)) != MP_OKAY) { + return err; + } + + /* return if it was trivially divisible */ + if (res == MP_YES) { + return MP_OKAY; + } + + /* now perform the miller-rabin rounds */ + if ((err = mp_init (&b)) != MP_OKAY) { + return err; + } + + for (ix = 0; ix < t; ix++) { + /* set the prime */ + mp_set (&b, ltm_prime_tab[ix]); + + if ((err = mp_prime_miller_rabin (a, &b, &res)) != MP_OKAY) { + goto LBL_B; + } + + if (res == MP_NO) { + goto LBL_B; + } + } + + /* passed the test */ + *result = MP_YES; +LBL_B:mp_clear (&b); + return err; +} + + +/* computes least common multiple as |a*b|/(a, b) */ +int mp_lcm (mp_int * a, mp_int * b, mp_int * c) +{ + int res; + mp_int t1, t2; + + + if ((res = mp_init_multi (&t1, &t2, NULL, NULL, NULL, NULL)) != MP_OKAY) { + return res; + } + + /* t1 = get the GCD of the two inputs */ + if ((res = mp_gcd (a, b, &t1)) != MP_OKAY) { + goto LBL_T; + } + + /* divide the smallest by the GCD */ + if (mp_cmp_mag(a, b) == MP_LT) { + /* store quotient in t2 such that t2 * b is the LCM */ + if ((res = mp_div(a, &t1, &t2, NULL)) != MP_OKAY) { + goto LBL_T; + } + res = mp_mul(b, &t2, c); + } else { + /* store quotient in t2 such that t2 * a is the LCM */ + if ((res = mp_div(b, &t1, &t2, NULL)) != MP_OKAY) { + goto LBL_T; + } + res = mp_mul(a, &t2, c); + } + + /* fix the sign to positive */ + c->sign = MP_ZPOS; + +LBL_T: + mp_clear(&t1); + mp_clear(&t2); + return res; +} + + + +/* Greatest Common Divisor using the binary method */ +int mp_gcd (mp_int * a, mp_int * b, mp_int * c) +{ + mp_int u, v; + int k, u_lsb, v_lsb, res; + + /* either zero than gcd is the largest */ + if (mp_iszero (a) == MP_YES) { + return mp_abs (b, c); + } + if (mp_iszero (b) == MP_YES) { + return mp_abs (a, c); + } + + /* get copies of a and b we can modify */ + if ((res = mp_init_copy (&u, a)) != MP_OKAY) { + return res; + } + + if ((res = mp_init_copy (&v, b)) != MP_OKAY) { + goto LBL_U; + } + + /* must be positive for the remainder of the algorithm */ + u.sign = v.sign = MP_ZPOS; + + /* B1. Find the common power of two for u and v */ + u_lsb = mp_cnt_lsb(&u); + v_lsb = mp_cnt_lsb(&v); + k = MIN(u_lsb, v_lsb); + + if (k > 0) { + /* divide the power of two out */ + if ((res = mp_div_2d(&u, k, &u, NULL)) != MP_OKAY) { + goto LBL_V; + } + + if ((res = mp_div_2d(&v, k, &v, NULL)) != MP_OKAY) { + goto LBL_V; + } + } + + /* divide any remaining factors of two out */ + if (u_lsb != k) { + if ((res = mp_div_2d(&u, u_lsb - k, &u, NULL)) != MP_OKAY) { + goto LBL_V; + } + } + + if (v_lsb != k) { + if ((res = mp_div_2d(&v, v_lsb - k, &v, NULL)) != MP_OKAY) { + goto LBL_V; + } + } + + while (mp_iszero(&v) == 0) { + /* make sure v is the largest */ + if (mp_cmp_mag(&u, &v) == MP_GT) { + /* swap u and v to make sure v is >= u */ + mp_exch(&u, &v); + } + + /* subtract smallest from largest */ + if ((res = s_mp_sub(&v, &u, &v)) != MP_OKAY) { + goto LBL_V; + } + + /* Divide out all factors of two */ + if ((res = mp_div_2d(&v, mp_cnt_lsb(&v), &v, NULL)) != MP_OKAY) { + goto LBL_V; + } + } + + /* multiply by 2**k which we divided out at the beginning */ + if ((res = mp_mul_2d (&u, k, c)) != MP_OKAY) { + goto LBL_V; + } + c->sign = MP_ZPOS; + res = MP_OKAY; +LBL_V:mp_clear (&u); +LBL_U:mp_clear (&v); + return res; +} + + + +#endif /* WOLFSSL_KEY_GEN */ + + +#ifdef HAVE_ECC + +/* chars used in radix conversions */ +const char *mp_s_rmap = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/"; + +/* read a string [ASCII] in a given radix */ +int mp_read_radix (mp_int * a, const char *str, int radix) +{ + int y, res, neg; + char ch; + + /* zero the digit bignum */ + mp_zero(a); + + /* make sure the radix is ok */ + if (radix < 2 || radix > 64) { + return MP_VAL; + } + + /* if the leading digit is a + * minus set the sign to negative. + */ + if (*str == '-') { + ++str; + neg = MP_NEG; + } else { + neg = MP_ZPOS; + } + + /* set the integer to the default of zero */ + mp_zero (a); + + /* process each digit of the string */ + while (*str) { + /* if the radix < 36 the conversion is case insensitive + * this allows numbers like 1AB and 1ab to represent the same value + * [e.g. in hex] + */ + ch = (char) ((radix < 36) ? XTOUPPER(*str) : *str); + for (y = 0; y < 64; y++) { + if (ch == mp_s_rmap[y]) { + break; + } + } + + /* if the char was found in the map + * and is less than the given radix add it + * to the number, otherwise exit the loop. + */ + if (y < radix) { + if ((res = mp_mul_d (a, (mp_digit) radix, a)) != MP_OKAY) { + return res; + } + if ((res = mp_add_d (a, (mp_digit) y, a)) != MP_OKAY) { + return res; + } + } else { + break; + } + ++str; + } + + /* set the sign only if a != 0 */ + if (mp_iszero(a) != 1) { + a->sign = neg; + } + return MP_OKAY; +} + +#endif /* HAVE_ECC */ + +#endif /* USE_FAST_MATH */ + +#endif /* NO_BIG_INT */ + diff --git a/wolfcrypt/src/misc.c b/wolfcrypt/src/misc.c index d542701c9..22a3eb3a5 100644 --- a/wolfcrypt/src/misc.c +++ b/wolfcrypt/src/misc.c @@ -2,14 +2,14 @@ * * Copyright (C) 2006-2014 wolfSSL Inc. * - * This file is part of CyaSSL. + * This file is part of wolfSSL. (formerly known as CyaSSL) * - * CyaSSL is free software; you can redistribute it and/or modify + * wolfSSL is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * CyaSSL is distributed in the hope that it will be useful, + * wolfSSL is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. @@ -23,9 +23,9 @@ #include #endif -#include +#include -#include +#include /* inlining these functions is a huge speed increase and a small size decrease, because the functions are smaller than function call setup/cleanup, e.g., diff --git a/wolfcrypt/src/tfm.c b/wolfcrypt/src/tfm.c index e69de29bb..ff49f15ad 100644 --- a/wolfcrypt/src/tfm.c +++ b/wolfcrypt/src/tfm.c @@ -0,0 +1,2544 @@ +/* tfm.c + * + * Copyright (C) 2006-2014 wolfSSL Inc. + * + * This file is part of wolfSSL. (formerly known as CyaSSL) + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + */ + + +/* + * Based on public domain TomsFastMath 0.10 by Tom St Denis, tomstdenis@iahu.ca, + * http://math.libtomcrypt.com + */ + +/** + * Edited by Moisés Guimarães (moisesguimaraesm@gmail.com) + * to fit CyaSSL's needs. + */ + +#ifdef HAVE_CONFIG_H + #include +#endif + +/* if using fips than the tfm.c from ctaocrypt is used @wc_fips */ +#ifndef HAVE_FIPS + +/* in case user set USE_FAST_MATH there */ +#include + +#ifdef USE_FAST_MATH + +#include +#include /* will define asm MACROS or C ones */ + + +/* math settings check */ +word32 CheckRunTimeSettings(void) +{ + return CTC_SETTINGS; +} + + +/* math settings size check */ +word32 CheckRunTimeFastMath(void) +{ + return FP_SIZE; +} + + +/* Functions */ + +void fp_add(fp_int *a, fp_int *b, fp_int *c) +{ + int sa, sb; + + /* get sign of both inputs */ + sa = a->sign; + sb = b->sign; + + /* handle two cases, not four */ + if (sa == sb) { + /* both positive or both negative */ + /* add their magnitudes, copy the sign */ + c->sign = sa; + s_fp_add (a, b, c); + } else { + /* one positive, the other negative */ + /* subtract the one with the greater magnitude from */ + /* the one of the lesser magnitude. The result gets */ + /* the sign of the one with the greater magnitude. */ + if (fp_cmp_mag (a, b) == FP_LT) { + c->sign = sb; + s_fp_sub (b, a, c); + } else { + c->sign = sa; + s_fp_sub (a, b, c); + } + } +} + +/* unsigned addition */ +void s_fp_add(fp_int *a, fp_int *b, fp_int *c) +{ + int x, y, oldused; + register fp_word t; + + y = MAX(a->used, b->used); + oldused = MIN(c->used, FP_SIZE); /* help static analysis w/ largest size */ + c->used = y; + + t = 0; + for (x = 0; x < y; x++) { + t += ((fp_word)a->dp[x]) + ((fp_word)b->dp[x]); + c->dp[x] = (fp_digit)t; + t >>= DIGIT_BIT; + } + if (t != 0 && x < FP_SIZE) { + c->dp[c->used++] = (fp_digit)t; + ++x; + } + + c->used = x; + for (; x < oldused; x++) { + c->dp[x] = 0; + } + fp_clamp(c); +} + +/* c = a - b */ +void fp_sub(fp_int *a, fp_int *b, fp_int *c) +{ + int sa, sb; + + sa = a->sign; + sb = b->sign; + + if (sa != sb) { + /* subtract a negative from a positive, OR */ + /* subtract a positive from a negative. */ + /* In either case, ADD their magnitudes, */ + /* and use the sign of the first number. */ + c->sign = sa; + s_fp_add (a, b, c); + } else { + /* subtract a positive from a positive, OR */ + /* subtract a negative from a negative. */ + /* First, take the difference between their */ + /* magnitudes, then... */ + if (fp_cmp_mag (a, b) != FP_LT) { + /* Copy the sign from the first */ + c->sign = sa; + /* The first has a larger or equal magnitude */ + s_fp_sub (a, b, c); + } else { + /* The result has the *opposite* sign from */ + /* the first number. */ + c->sign = (sa == FP_ZPOS) ? FP_NEG : FP_ZPOS; + /* The second has a larger magnitude */ + s_fp_sub (b, a, c); + } + } +} + +/* unsigned subtraction ||a|| >= ||b|| ALWAYS! */ +void s_fp_sub(fp_int *a, fp_int *b, fp_int *c) +{ + int x, oldbused, oldused; + fp_word t; + + oldused = c->used; + oldbused = b->used; + c->used = a->used; + t = 0; + for (x = 0; x < oldbused; x++) { + t = ((fp_word)a->dp[x]) - (((fp_word)b->dp[x]) + t); + c->dp[x] = (fp_digit)t; + t = (t >> DIGIT_BIT)&1; + } + for (; x < a->used; x++) { + t = ((fp_word)a->dp[x]) - t; + c->dp[x] = (fp_digit)t; + t = (t >> DIGIT_BIT)&1; + } + for (; x < oldused; x++) { + c->dp[x] = 0; + } + fp_clamp(c); +} + +/* c = a * b */ +void fp_mul(fp_int *A, fp_int *B, fp_int *C) +{ + int y, yy; + + y = MAX(A->used, B->used); + yy = MIN(A->used, B->used); + + /* call generic if we're out of range */ + if (y + yy > FP_SIZE) { + fp_mul_comba(A, B, C); + return ; + } + + /* pick a comba (unrolled 4/8/16/32 x or rolled) based on the size + of the largest input. We also want to avoid doing excess mults if the + inputs are not close to the next power of two. That is, for example, + if say y=17 then we would do (32-17)^2 = 225 unneeded multiplications + */ + +#ifdef TFM_MUL3 + if (y <= 3) { + fp_mul_comba3(A,B,C); + return; + } +#endif +#ifdef TFM_MUL4 + if (y == 4) { + fp_mul_comba4(A,B,C); + return; + } +#endif +#ifdef TFM_MUL6 + if (y <= 6) { + fp_mul_comba6(A,B,C); + return; + } +#endif +#ifdef TFM_MUL7 + if (y == 7) { + fp_mul_comba7(A,B,C); + return; + } +#endif +#ifdef TFM_MUL8 + if (y == 8) { + fp_mul_comba8(A,B,C); + return; + } +#endif +#ifdef TFM_MUL9 + if (y == 9) { + fp_mul_comba9(A,B,C); + return; + } +#endif +#ifdef TFM_MUL12 + if (y <= 12) { + fp_mul_comba12(A,B,C); + return; + } +#endif +#ifdef TFM_MUL17 + if (y <= 17) { + fp_mul_comba17(A,B,C); + return; + } +#endif + +#ifdef TFM_SMALL_SET + if (y <= 16) { + fp_mul_comba_small(A,B,C); + return; + } +#endif +#if defined(TFM_MUL20) + if (y <= 20) { + fp_mul_comba20(A,B,C); + return; + } +#endif +#if defined(TFM_MUL24) + if (yy >= 16 && y <= 24) { + fp_mul_comba24(A,B,C); + return; + } +#endif +#if defined(TFM_MUL28) + if (yy >= 20 && y <= 28) { + fp_mul_comba28(A,B,C); + return; + } +#endif +#if defined(TFM_MUL32) + if (yy >= 24 && y <= 32) { + fp_mul_comba32(A,B,C); + return; + } +#endif +#if defined(TFM_MUL48) + if (yy >= 40 && y <= 48) { + fp_mul_comba48(A,B,C); + return; + } +#endif +#if defined(TFM_MUL64) + if (yy >= 56 && y <= 64) { + fp_mul_comba64(A,B,C); + return; + } +#endif + fp_mul_comba(A,B,C); +} + +void fp_mul_2(fp_int * a, fp_int * b) +{ + int x, oldused; + + oldused = b->used; + b->used = a->used; + + { + register fp_digit r, rr, *tmpa, *tmpb; + + /* alias for source */ + tmpa = a->dp; + + /* alias for dest */ + tmpb = b->dp; + + /* carry */ + r = 0; + for (x = 0; x < a->used; x++) { + + /* get what will be the *next* carry bit from the + * MSB of the current digit + */ + rr = *tmpa >> ((fp_digit)(DIGIT_BIT - 1)); + + /* now shift up this digit, add in the carry [from the previous] */ + *tmpb++ = ((*tmpa++ << ((fp_digit)1)) | r); + + /* copy the carry that would be from the source + * digit into the next iteration + */ + r = rr; + } + + /* new leading digit? */ + if (r != 0 && b->used != (FP_SIZE-1)) { + /* add a MSB which is always 1 at this point */ + *tmpb = 1; + ++(b->used); + } + + /* now zero any excess digits on the destination + * that we didn't write to + */ + tmpb = b->dp + b->used; + for (x = b->used; x < oldused; x++) { + *tmpb++ = 0; + } + } + b->sign = a->sign; +} + +/* c = a * b */ +void fp_mul_d(fp_int *a, fp_digit b, fp_int *c) +{ + fp_word w; + int x, oldused; + + oldused = c->used; + c->used = a->used; + c->sign = a->sign; + w = 0; + for (x = 0; x < a->used; x++) { + w = ((fp_word)a->dp[x]) * ((fp_word)b) + w; + c->dp[x] = (fp_digit)w; + w = w >> DIGIT_BIT; + } + if (w != 0 && (a->used != FP_SIZE)) { + c->dp[c->used++] = (fp_digit) w; + ++x; + } + for (; x < oldused; x++) { + c->dp[x] = 0; + } + fp_clamp(c); +} + +/* c = a * 2**d */ +void fp_mul_2d(fp_int *a, int b, fp_int *c) +{ + fp_digit carry, carrytmp, shift; + int x; + + /* copy it */ + fp_copy(a, c); + + /* handle whole digits */ + if (b >= DIGIT_BIT) { + fp_lshd(c, b/DIGIT_BIT); + } + b %= DIGIT_BIT; + + /* shift the digits */ + if (b != 0) { + carry = 0; + shift = DIGIT_BIT - b; + for (x = 0; x < c->used; x++) { + carrytmp = c->dp[x] >> shift; + c->dp[x] = (c->dp[x] << b) + carry; + carry = carrytmp; + } + /* store last carry if room */ + if (carry && x < FP_SIZE) { + c->dp[c->used++] = carry; + } + } + fp_clamp(c); +} + +/* generic PxQ multiplier */ +void fp_mul_comba(fp_int *A, fp_int *B, fp_int *C) +{ + int ix, iy, iz, tx, ty, pa; + fp_digit c0, c1, c2, *tmpx, *tmpy; + fp_int tmp, *dst; + + COMBA_START; + COMBA_CLEAR; + + /* get size of output and trim */ + pa = A->used + B->used; + if (pa >= FP_SIZE) { + pa = FP_SIZE-1; + } + + if (A == C || B == C) { + fp_zero(&tmp); + dst = &tmp; + } else { + fp_zero(C); + dst = C; + } + + for (ix = 0; ix < pa; ix++) { + /* get offsets into the two bignums */ + ty = MIN(ix, B->used-1); + tx = ix - ty; + + /* setup temp aliases */ + tmpx = A->dp + tx; + tmpy = B->dp + ty; + + /* this is the number of times the loop will iterrate, essentially its + while (tx++ < a->used && ty-- >= 0) { ... } + */ + iy = MIN(A->used-tx, ty+1); + + /* execute loop */ + COMBA_FORWARD; + for (iz = 0; iz < iy; ++iz) { + /* TAO change COMBA_ADD back to MULADD */ + MULADD(*tmpx++, *tmpy--); + } + + /* store term */ + COMBA_STORE(dst->dp[ix]); + } + COMBA_FINI; + + dst->used = pa; + dst->sign = A->sign ^ B->sign; + fp_clamp(dst); + fp_copy(dst, C); +} + +/* a/b => cb + d == a */ +int fp_div(fp_int *a, fp_int *b, fp_int *c, fp_int *d) +{ + fp_int q, x, y, t1, t2; + int n, t, i, norm, neg; + + /* is divisor zero ? */ + if (fp_iszero (b) == 1) { + return FP_VAL; + } + + /* if a < b then q=0, r = a */ + if (fp_cmp_mag (a, b) == FP_LT) { + if (d != NULL) { + fp_copy (a, d); + } + if (c != NULL) { + fp_zero (c); + } + return FP_OKAY; + } + + fp_init(&q); + q.used = a->used + 2; + + fp_init(&t1); + fp_init(&t2); + fp_init_copy(&x, a); + fp_init_copy(&y, b); + + /* fix the sign */ + neg = (a->sign == b->sign) ? FP_ZPOS : FP_NEG; + x.sign = y.sign = FP_ZPOS; + + /* normalize both x and y, ensure that y >= b/2, [b == 2**DIGIT_BIT] */ + norm = fp_count_bits(&y) % DIGIT_BIT; + if (norm < (int)(DIGIT_BIT-1)) { + norm = (DIGIT_BIT-1) - norm; + fp_mul_2d (&x, norm, &x); + fp_mul_2d (&y, norm, &y); + } else { + norm = 0; + } + + /* note hac does 0 based, so if used==5 then its 0,1,2,3,4, e.g. use 4 */ + n = x.used - 1; + t = y.used - 1; + + /* while (x >= y*b**n-t) do { q[n-t] += 1; x -= y*b**{n-t} } */ + fp_lshd (&y, n - t); /* y = y*b**{n-t} */ + + while (fp_cmp (&x, &y) != FP_LT) { + ++(q.dp[n - t]); + fp_sub (&x, &y, &x); + } + + /* reset y by shifting it back down */ + fp_rshd (&y, n - t); + + /* step 3. for i from n down to (t + 1) */ + for (i = n; i >= (t + 1); i--) { + if (i > x.used) { + continue; + } + + /* step 3.1 if xi == yt then set q{i-t-1} to b-1, + * otherwise set q{i-t-1} to (xi*b + x{i-1})/yt */ + if (x.dp[i] == y.dp[t]) { + q.dp[i - t - 1] = (fp_digit) ((((fp_word)1) << DIGIT_BIT) - 1); + } else { + fp_word tmp; + tmp = ((fp_word) x.dp[i]) << ((fp_word) DIGIT_BIT); + tmp |= ((fp_word) x.dp[i - 1]); + tmp /= ((fp_word)y.dp[t]); + q.dp[i - t - 1] = (fp_digit) (tmp); + } + + /* while (q{i-t-1} * (yt * b + y{t-1})) > + xi * b**2 + xi-1 * b + xi-2 + + do q{i-t-1} -= 1; + */ + q.dp[i - t - 1] = (q.dp[i - t - 1] + 1); + do { + q.dp[i - t - 1] = (q.dp[i - t - 1] - 1); + + /* find left hand */ + fp_zero (&t1); + t1.dp[0] = (t - 1 < 0) ? 0 : y.dp[t - 1]; + t1.dp[1] = y.dp[t]; + t1.used = 2; + fp_mul_d (&t1, q.dp[i - t - 1], &t1); + + /* find right hand */ + t2.dp[0] = (i - 2 < 0) ? 0 : x.dp[i - 2]; + t2.dp[1] = (i - 1 < 0) ? 0 : x.dp[i - 1]; + t2.dp[2] = x.dp[i]; + t2.used = 3; + } while (fp_cmp_mag(&t1, &t2) == FP_GT); + + /* step 3.3 x = x - q{i-t-1} * y * b**{i-t-1} */ + fp_mul_d (&y, q.dp[i - t - 1], &t1); + fp_lshd (&t1, i - t - 1); + fp_sub (&x, &t1, &x); + + /* if x < 0 then { x = x + y*b**{i-t-1}; q{i-t-1} -= 1; } */ + if (x.sign == FP_NEG) { + fp_copy (&y, &t1); + fp_lshd (&t1, i - t - 1); + fp_add (&x, &t1, &x); + q.dp[i - t - 1] = q.dp[i - t - 1] - 1; + } + } + + /* now q is the quotient and x is the remainder + * [which we have to normalize] + */ + + /* get sign before writing to c */ + x.sign = x.used == 0 ? FP_ZPOS : a->sign; + + if (c != NULL) { + fp_clamp (&q); + fp_copy (&q, c); + c->sign = neg; + } + + if (d != NULL) { + fp_div_2d (&x, norm, &x, NULL); + +/* the following is a kludge, essentially we were seeing the right remainder but + with excess digits that should have been zero + */ + for (i = b->used; i < x.used; i++) { + x.dp[i] = 0; + } + fp_clamp(&x); + fp_copy (&x, d); + } + + return FP_OKAY; +} + +/* b = a/2 */ +void fp_div_2(fp_int * a, fp_int * b) +{ + int x, oldused; + + oldused = b->used; + b->used = a->used; + { + register fp_digit r, rr, *tmpa, *tmpb; + + /* source alias */ + tmpa = a->dp + b->used - 1; + + /* dest alias */ + tmpb = b->dp + b->used - 1; + + /* carry */ + r = 0; + for (x = b->used - 1; x >= 0; x--) { + /* get the carry for the next iteration */ + rr = *tmpa & 1; + + /* shift the current digit, add in carry and store */ + *tmpb-- = (*tmpa-- >> 1) | (r << (DIGIT_BIT - 1)); + + /* forward carry to next iteration */ + r = rr; + } + + /* zero excess digits */ + tmpb = b->dp + b->used; + for (x = b->used; x < oldused; x++) { + *tmpb++ = 0; + } + } + b->sign = a->sign; + fp_clamp (b); +} + +/* c = a / 2**b */ +void fp_div_2d(fp_int *a, int b, fp_int *c, fp_int *d) +{ + int D; + fp_int t; + + /* if the shift count is <= 0 then we do no work */ + if (b <= 0) { + fp_copy (a, c); + if (d != NULL) { + fp_zero (d); + } + return; + } + + fp_init(&t); + + /* get the remainder */ + if (d != NULL) { + fp_mod_2d (a, b, &t); + } + + /* copy */ + fp_copy(a, c); + + /* shift by as many digits in the bit count */ + if (b >= (int)DIGIT_BIT) { + fp_rshd (c, b / DIGIT_BIT); + } + + /* shift any bit count < DIGIT_BIT */ + D = (b % DIGIT_BIT); + if (D != 0) { + fp_rshb(c, D); + } + fp_clamp (c); + if (d != NULL) { + fp_copy (&t, d); + } +} + +/* c = a mod b, 0 <= c < b */ +int fp_mod(fp_int *a, fp_int *b, fp_int *c) +{ + fp_int t; + int err; + + fp_zero(&t); + if ((err = fp_div(a, b, NULL, &t)) != FP_OKAY) { + return err; + } + if (t.sign != b->sign) { + fp_add(&t, b, c); + } else { + fp_copy(&t, c); + } + return FP_OKAY; +} + +/* c = a mod 2**d */ +void fp_mod_2d(fp_int *a, int b, fp_int *c) +{ + int x; + + /* zero if count less than or equal to zero */ + if (b <= 0) { + fp_zero(c); + return; + } + + /* get copy of input */ + fp_copy(a, c); + + /* if 2**d is larger than we just return */ + if (b >= (DIGIT_BIT * a->used)) { + return; + } + + /* zero digits above the last digit of the modulus */ + for (x = (b / DIGIT_BIT) + ((b % DIGIT_BIT) == 0 ? 0 : 1); x < c->used; x++) { + c->dp[x] = 0; + } + /* clear the digit that is not completely outside/inside the modulus */ + c->dp[b / DIGIT_BIT] &= ~((fp_digit)0) >> (DIGIT_BIT - b); + fp_clamp (c); +} + +static int fp_invmod_slow (fp_int * a, fp_int * b, fp_int * c) +{ + fp_int x, y, u, v, A, B, C, D; + int res; + + /* b cannot be negative */ + if (b->sign == FP_NEG || fp_iszero(b) == 1) { + return FP_VAL; + } + + /* init temps */ + fp_init(&x); fp_init(&y); + fp_init(&u); fp_init(&v); + fp_init(&A); fp_init(&B); + fp_init(&C); fp_init(&D); + + /* x = a, y = b */ + if ((res = fp_mod(a, b, &x)) != FP_OKAY) { + return res; + } + fp_copy(b, &y); + + /* 2. [modified] if x,y are both even then return an error! */ + if (fp_iseven (&x) == 1 && fp_iseven (&y) == 1) { + return FP_VAL; + } + + /* 3. u=x, v=y, A=1, B=0, C=0,D=1 */ + fp_copy (&x, &u); + fp_copy (&y, &v); + fp_set (&A, 1); + fp_set (&D, 1); + +top: + /* 4. while u is even do */ + while (fp_iseven (&u) == 1) { + /* 4.1 u = u/2 */ + fp_div_2 (&u, &u); + + /* 4.2 if A or B is odd then */ + if (fp_isodd (&A) == 1 || fp_isodd (&B) == 1) { + /* A = (A+y)/2, B = (B-x)/2 */ + fp_add (&A, &y, &A); + fp_sub (&B, &x, &B); + } + /* A = A/2, B = B/2 */ + fp_div_2 (&A, &A); + fp_div_2 (&B, &B); + } + + /* 5. while v is even do */ + while (fp_iseven (&v) == 1) { + /* 5.1 v = v/2 */ + fp_div_2 (&v, &v); + + /* 5.2 if C or D is odd then */ + if (fp_isodd (&C) == 1 || fp_isodd (&D) == 1) { + /* C = (C+y)/2, D = (D-x)/2 */ + fp_add (&C, &y, &C); + fp_sub (&D, &x, &D); + } + /* C = C/2, D = D/2 */ + fp_div_2 (&C, &C); + fp_div_2 (&D, &D); + } + + /* 6. if u >= v then */ + if (fp_cmp (&u, &v) != FP_LT) { + /* u = u - v, A = A - C, B = B - D */ + fp_sub (&u, &v, &u); + fp_sub (&A, &C, &A); + fp_sub (&B, &D, &B); + } else { + /* v - v - u, C = C - A, D = D - B */ + fp_sub (&v, &u, &v); + fp_sub (&C, &A, &C); + fp_sub (&D, &B, &D); + } + + /* if not zero goto step 4 */ + if (fp_iszero (&u) == 0) + goto top; + + /* now a = C, b = D, gcd == g*v */ + + /* if v != 1 then there is no inverse */ + if (fp_cmp_d (&v, 1) != FP_EQ) { + return FP_VAL; + } + + /* if its too low */ + while (fp_cmp_d(&C, 0) == FP_LT) { + fp_add(&C, b, &C); + } + + /* too big */ + while (fp_cmp_mag(&C, b) != FP_LT) { + fp_sub(&C, b, &C); + } + + /* C is now the inverse */ + fp_copy(&C, c); + return FP_OKAY; +} + +/* c = 1/a (mod b) for odd b only */ +int fp_invmod(fp_int *a, fp_int *b, fp_int *c) +{ + fp_int x, y, u, v, B, D; + int neg; + + /* 2. [modified] b must be odd */ + if (fp_iseven (b) == FP_YES) { + return fp_invmod_slow(a,b,c); + } + + /* init all our temps */ + fp_init(&x); fp_init(&y); + fp_init(&u); fp_init(&v); + fp_init(&B); fp_init(&D); + + /* x == modulus, y == value to invert */ + fp_copy(b, &x); + + /* we need y = |a| */ + fp_abs(a, &y); + + /* 3. u=x, v=y, A=1, B=0, C=0,D=1 */ + fp_copy(&x, &u); + fp_copy(&y, &v); + fp_set (&D, 1); + +top: + /* 4. while u is even do */ + while (fp_iseven (&u) == FP_YES) { + /* 4.1 u = u/2 */ + fp_div_2 (&u, &u); + + /* 4.2 if B is odd then */ + if (fp_isodd (&B) == FP_YES) { + fp_sub (&B, &x, &B); + } + /* B = B/2 */ + fp_div_2 (&B, &B); + } + + /* 5. while v is even do */ + while (fp_iseven (&v) == FP_YES) { + /* 5.1 v = v/2 */ + fp_div_2 (&v, &v); + + /* 5.2 if D is odd then */ + if (fp_isodd (&D) == FP_YES) { + /* D = (D-x)/2 */ + fp_sub (&D, &x, &D); + } + /* D = D/2 */ + fp_div_2 (&D, &D); + } + + /* 6. if u >= v then */ + if (fp_cmp (&u, &v) != FP_LT) { + /* u = u - v, B = B - D */ + fp_sub (&u, &v, &u); + fp_sub (&B, &D, &B); + } else { + /* v - v - u, D = D - B */ + fp_sub (&v, &u, &v); + fp_sub (&D, &B, &D); + } + + /* if not zero goto step 4 */ + if (fp_iszero (&u) == FP_NO) { + goto top; + } + + /* now a = C, b = D, gcd == g*v */ + + /* if v != 1 then there is no inverse */ + if (fp_cmp_d (&v, 1) != FP_EQ) { + return FP_VAL; + } + + /* b is now the inverse */ + neg = a->sign; + while (D.sign == FP_NEG) { + fp_add (&D, b, &D); + } + fp_copy (&D, c); + c->sign = neg; + return FP_OKAY; +} + +/* d = a * b (mod c) */ +int fp_mulmod(fp_int *a, fp_int *b, fp_int *c, fp_int *d) +{ + fp_int tmp; + fp_zero(&tmp); + fp_mul(a, b, &tmp); + return fp_mod(&tmp, c, d); +} + +#ifdef TFM_TIMING_RESISTANT + +/* timing resistant montgomery ladder based exptmod + + Based on work by Marc Joye, Sung-Ming Yen, "The Montgomery Powering Ladder", Cryptographic Hardware and Embedded Systems, CHES 2002 +*/ +static int _fp_exptmod(fp_int * G, fp_int * X, fp_int * P, fp_int * Y) +{ + fp_int R[2]; + fp_digit buf, mp; + int err, bitcnt, digidx, y; + + /* now setup montgomery */ + if ((err = fp_montgomery_setup (P, &mp)) != FP_OKAY) { + return err; + } + + fp_init(&R[0]); + fp_init(&R[1]); + + /* now we need R mod m */ + fp_montgomery_calc_normalization (&R[0], P); + + /* now set R[0][1] to G * R mod m */ + if (fp_cmp_mag(P, G) != FP_GT) { + /* G > P so we reduce it first */ + fp_mod(G, P, &R[1]); + } else { + fp_copy(G, &R[1]); + } + fp_mulmod (&R[1], &R[0], P, &R[1]); + + /* for j = t-1 downto 0 do + r_!k = R0*R1; r_k = r_k^2 + */ + + /* set initial mode and bit cnt */ + bitcnt = 1; + buf = 0; + digidx = X->used - 1; + + for (;;) { + /* grab next digit as required */ + if (--bitcnt == 0) { + /* if digidx == -1 we are out of digits so break */ + if (digidx == -1) { + break; + } + /* read next digit and reset bitcnt */ + buf = X->dp[digidx--]; + bitcnt = (int)DIGIT_BIT; + } + + /* grab the next msb from the exponent */ + y = (int)(buf >> (DIGIT_BIT - 1)) & 1; + buf <<= (fp_digit)1; + + /* do ops */ + fp_mul(&R[0], &R[1], &R[y^1]); fp_montgomery_reduce(&R[y^1], P, mp); + fp_sqr(&R[y], &R[y]); fp_montgomery_reduce(&R[y], P, mp); + } + + fp_montgomery_reduce(&R[0], P, mp); + fp_copy(&R[0], Y); + return FP_OKAY; +} + +#else + +/* y = g**x (mod b) + * Some restrictions... x must be positive and < b + */ +static int _fp_exptmod(fp_int * G, fp_int * X, fp_int * P, fp_int * Y) +{ + fp_int M[64], res; + fp_digit buf, mp; + int err, bitbuf, bitcpy, bitcnt, mode, digidx, x, y, winsize; + + /* find window size */ + x = fp_count_bits (X); + if (x <= 21) { + winsize = 1; + } else if (x <= 36) { + winsize = 3; + } else if (x <= 140) { + winsize = 4; + } else if (x <= 450) { + winsize = 5; + } else { + winsize = 6; + } + + /* init M array */ + XMEMSET(M, 0, sizeof(M)); + + /* now setup montgomery */ + if ((err = fp_montgomery_setup (P, &mp)) != FP_OKAY) { + return err; + } + + /* setup result */ + fp_init(&res); + + /* create M table + * + * The M table contains powers of the input base, e.g. M[x] = G^x mod P + * + * The first half of the table is not computed though accept for M[0] and M[1] + */ + + /* now we need R mod m */ + fp_montgomery_calc_normalization (&res, P); + + /* now set M[1] to G * R mod m */ + if (fp_cmp_mag(P, G) != FP_GT) { + /* G > P so we reduce it first */ + fp_mod(G, P, &M[1]); + } else { + fp_copy(G, &M[1]); + } + fp_mulmod (&M[1], &res, P, &M[1]); + + /* compute the value at M[1<<(winsize-1)] by squaring M[1] (winsize-1) times */ + fp_copy (&M[1], &M[1 << (winsize - 1)]); + for (x = 0; x < (winsize - 1); x++) { + fp_sqr (&M[1 << (winsize - 1)], &M[1 << (winsize - 1)]); + fp_montgomery_reduce (&M[1 << (winsize - 1)], P, mp); + } + + /* create upper table */ + for (x = (1 << (winsize - 1)) + 1; x < (1 << winsize); x++) { + fp_mul(&M[x - 1], &M[1], &M[x]); + fp_montgomery_reduce(&M[x], P, mp); + } + + /* set initial mode and bit cnt */ + mode = 0; + bitcnt = 1; + buf = 0; + digidx = X->used - 1; + bitcpy = 0; + bitbuf = 0; + + for (;;) { + /* grab next digit as required */ + if (--bitcnt == 0) { + /* if digidx == -1 we are out of digits so break */ + if (digidx == -1) { + break; + } + /* read next digit and reset bitcnt */ + buf = X->dp[digidx--]; + bitcnt = (int)DIGIT_BIT; + } + + /* grab the next msb from the exponent */ + y = (int)(buf >> (DIGIT_BIT - 1)) & 1; + buf <<= (fp_digit)1; + + /* if the bit is zero and mode == 0 then we ignore it + * These represent the leading zero bits before the first 1 bit + * in the exponent. Technically this opt is not required but it + * does lower the # of trivial squaring/reductions used + */ + if (mode == 0 && y == 0) { + continue; + } + + /* if the bit is zero and mode == 1 then we square */ + if (mode == 1 && y == 0) { + fp_sqr(&res, &res); + fp_montgomery_reduce(&res, P, mp); + continue; + } + + /* else we add it to the window */ + bitbuf |= (y << (winsize - ++bitcpy)); + mode = 2; + + if (bitcpy == winsize) { + /* ok window is filled so square as required and multiply */ + /* square first */ + for (x = 0; x < winsize; x++) { + fp_sqr(&res, &res); + fp_montgomery_reduce(&res, P, mp); + } + + /* then multiply */ + fp_mul(&res, &M[bitbuf], &res); + fp_montgomery_reduce(&res, P, mp); + + /* empty window and reset */ + bitcpy = 0; + bitbuf = 0; + mode = 1; + } + } + + /* if bits remain then square/multiply */ + if (mode == 2 && bitcpy > 0) { + /* square then multiply if the bit is set */ + for (x = 0; x < bitcpy; x++) { + fp_sqr(&res, &res); + fp_montgomery_reduce(&res, P, mp); + + /* get next bit of the window */ + bitbuf <<= 1; + if ((bitbuf & (1 << winsize)) != 0) { + /* then multiply */ + fp_mul(&res, &M[1], &res); + fp_montgomery_reduce(&res, P, mp); + } + } + } + + /* fixup result if Montgomery reduction is used + * recall that any value in a Montgomery system is + * actually multiplied by R mod n. So we have + * to reduce one more time to cancel out the factor + * of R. + */ + fp_montgomery_reduce(&res, P, mp); + + /* swap res with Y */ + fp_copy (&res, Y); + return FP_OKAY; +} + +#endif + +int fp_exptmod(fp_int * G, fp_int * X, fp_int * P, fp_int * Y) +{ + /* prevent overflows */ + if (P->used > (FP_SIZE/2)) { + return FP_VAL; + } + + if (X->sign == FP_NEG) { +#ifndef POSITIVE_EXP_ONLY /* reduce stack if assume no negatives */ + int err; + fp_int tmp; + + /* yes, copy G and invmod it */ + fp_copy(G, &tmp); + if ((err = fp_invmod(&tmp, P, &tmp)) != FP_OKAY) { + return err; + } + X->sign = FP_ZPOS; + err = _fp_exptmod(&tmp, X, P, Y); + if (X != Y) { + X->sign = FP_NEG; + } + return err; +#else + return FP_VAL; +#endif + } + else { + /* Positive exponent so just exptmod */ + return _fp_exptmod(G, X, P, Y); + } +} + +/* computes a = 2**b */ +void fp_2expt(fp_int *a, int b) +{ + int z; + + /* zero a as per default */ + fp_zero (a); + + if (b < 0) { + return; + } + + z = b / DIGIT_BIT; + if (z >= FP_SIZE) { + return; + } + + /* set the used count of where the bit will go */ + a->used = z + 1; + + /* put the single bit in its place */ + a->dp[z] = ((fp_digit)1) << (b % DIGIT_BIT); +} + +/* b = a*a */ +void fp_sqr(fp_int *A, fp_int *B) +{ + int y = A->used; + + /* call generic if we're out of range */ + if (y + y > FP_SIZE) { + fp_sqr_comba(A, B); + return ; + } + +#if defined(TFM_SQR3) + if (y <= 3) { + fp_sqr_comba3(A,B); + return; + } +#endif +#if defined(TFM_SQR4) + if (y == 4) { + fp_sqr_comba4(A,B); + return; + } +#endif +#if defined(TFM_SQR6) + if (y <= 6) { + fp_sqr_comba6(A,B); + return; + } +#endif +#if defined(TFM_SQR7) + if (y == 7) { + fp_sqr_comba7(A,B); + return; + } +#endif +#if defined(TFM_SQR8) + if (y == 8) { + fp_sqr_comba8(A,B); + return; + } +#endif +#if defined(TFM_SQR9) + if (y == 9) { + fp_sqr_comba9(A,B); + return; + } +#endif +#if defined(TFM_SQR12) + if (y <= 12) { + fp_sqr_comba12(A,B); + return; + } +#endif +#if defined(TFM_SQR17) + if (y <= 17) { + fp_sqr_comba17(A,B); + return; + } +#endif +#if defined(TFM_SMALL_SET) + if (y <= 16) { + fp_sqr_comba_small(A,B); + return; + } +#endif +#if defined(TFM_SQR20) + if (y <= 20) { + fp_sqr_comba20(A,B); + return; + } +#endif +#if defined(TFM_SQR24) + if (y <= 24) { + fp_sqr_comba24(A,B); + return; + } +#endif +#if defined(TFM_SQR28) + if (y <= 28) { + fp_sqr_comba28(A,B); + return; + } +#endif +#if defined(TFM_SQR32) + if (y <= 32) { + fp_sqr_comba32(A,B); + return; + } +#endif +#if defined(TFM_SQR48) + if (y <= 48) { + fp_sqr_comba48(A,B); + return; + } +#endif +#if defined(TFM_SQR64) + if (y <= 64) { + fp_sqr_comba64(A,B); + return; + } +#endif + fp_sqr_comba(A, B); +} + +/* generic comba squarer */ +void fp_sqr_comba(fp_int *A, fp_int *B) +{ + int pa, ix, iz; + fp_digit c0, c1, c2; + fp_int tmp, *dst; +#ifdef TFM_ISO + fp_word tt; +#endif + + /* get size of output and trim */ + pa = A->used + A->used; + if (pa >= FP_SIZE) { + pa = FP_SIZE-1; + } + + /* number of output digits to produce */ + COMBA_START; + COMBA_CLEAR; + + if (A == B) { + fp_zero(&tmp); + dst = &tmp; + } else { + fp_zero(B); + dst = B; + } + + for (ix = 0; ix < pa; ix++) { + int tx, ty, iy; + fp_digit *tmpy, *tmpx; + + /* get offsets into the two bignums */ + ty = MIN(A->used-1, ix); + tx = ix - ty; + + /* setup temp aliases */ + tmpx = A->dp + tx; + tmpy = A->dp + ty; + + /* this is the number of times the loop will iterrate, + while (tx++ < a->used && ty-- >= 0) { ... } + */ + iy = MIN(A->used-tx, ty+1); + + /* now for squaring tx can never equal ty + * we halve the distance since they approach + * at a rate of 2x and we have to round because + * odd cases need to be executed + */ + iy = MIN(iy, (ty-tx+1)>>1); + + /* forward carries */ + COMBA_FORWARD; + + /* execute loop */ + for (iz = 0; iz < iy; iz++) { + SQRADD2(*tmpx++, *tmpy--); + } + + /* even columns have the square term in them */ + if ((ix&1) == 0) { + /* TAO change COMBA_ADD back to SQRADD */ + SQRADD(A->dp[ix>>1], A->dp[ix>>1]); + } + + /* store it */ + COMBA_STORE(dst->dp[ix]); + } + + COMBA_FINI; + + /* setup dest */ + dst->used = pa; + fp_clamp (dst); + if (dst != B) { + fp_copy(dst, B); + } +} + +int fp_cmp(fp_int *a, fp_int *b) +{ + if (a->sign == FP_NEG && b->sign == FP_ZPOS) { + return FP_LT; + } else if (a->sign == FP_ZPOS && b->sign == FP_NEG) { + return FP_GT; + } else { + /* compare digits */ + if (a->sign == FP_NEG) { + /* if negative compare opposite direction */ + return fp_cmp_mag(b, a); + } else { + return fp_cmp_mag(a, b); + } + } +} + +/* compare against a single digit */ +int fp_cmp_d(fp_int *a, fp_digit b) +{ + /* compare based on sign */ + if ((b && a->used == 0) || a->sign == FP_NEG) { + return FP_LT; + } + + /* compare based on magnitude */ + if (a->used > 1) { + return FP_GT; + } + + /* compare the only digit of a to b */ + if (a->dp[0] > b) { + return FP_GT; + } else if (a->dp[0] < b) { + return FP_LT; + } else { + return FP_EQ; + } + +} + +int fp_cmp_mag(fp_int *a, fp_int *b) +{ + int x; + + if (a->used > b->used) { + return FP_GT; + } else if (a->used < b->used) { + return FP_LT; + } else { + for (x = a->used - 1; x >= 0; x--) { + if (a->dp[x] > b->dp[x]) { + return FP_GT; + } else if (a->dp[x] < b->dp[x]) { + return FP_LT; + } + } + } + return FP_EQ; +} + +/* setups the montgomery reduction */ +int fp_montgomery_setup(fp_int *a, fp_digit *rho) +{ + fp_digit x, b; + +/* fast inversion mod 2**k + * + * Based on the fact that + * + * XA = 1 (mod 2**n) => (X(2-XA)) A = 1 (mod 2**2n) + * => 2*X*A - X*X*A*A = 1 + * => 2*(1) - (1) = 1 + */ + b = a->dp[0]; + + if ((b & 1) == 0) { + return FP_VAL; + } + + x = (((b + 2) & 4) << 1) + b; /* here x*a==1 mod 2**4 */ + x *= 2 - b * x; /* here x*a==1 mod 2**8 */ + x *= 2 - b * x; /* here x*a==1 mod 2**16 */ + x *= 2 - b * x; /* here x*a==1 mod 2**32 */ +#ifdef FP_64BIT + x *= 2 - b * x; /* here x*a==1 mod 2**64 */ +#endif + + /* rho = -1/m mod b */ + *rho = (fp_digit) (((fp_word) 1 << ((fp_word) DIGIT_BIT)) - ((fp_word)x)); + + return FP_OKAY; +} + +/* computes a = B**n mod b without division or multiplication useful for + * normalizing numbers in a Montgomery system. + */ +void fp_montgomery_calc_normalization(fp_int *a, fp_int *b) +{ + int x, bits; + + /* how many bits of last digit does b use */ + bits = fp_count_bits (b) % DIGIT_BIT; + if (!bits) bits = DIGIT_BIT; + + /* compute A = B^(n-1) * 2^(bits-1) */ + if (b->used > 1) { + fp_2expt (a, (b->used - 1) * DIGIT_BIT + bits - 1); + } else { + fp_set(a, 1); + bits = 1; + } + + /* now compute C = A * B mod b */ + for (x = bits - 1; x < (int)DIGIT_BIT; x++) { + fp_mul_2 (a, a); + if (fp_cmp_mag (a, b) != FP_LT) { + s_fp_sub (a, b, a); + } + } +} + + +#ifdef TFM_SMALL_MONT_SET + #include "fp_mont_small.i" +#endif + +/* computes x/R == x (mod N) via Montgomery Reduction */ +void fp_montgomery_reduce(fp_int *a, fp_int *m, fp_digit mp) +{ + fp_digit c[FP_SIZE], *_c, *tmpm, mu = 0; + int oldused, x, y, pa; + + /* bail if too large */ + if (m->used > (FP_SIZE/2)) { + (void)mu; /* shut up compiler */ + return; + } + +#ifdef TFM_SMALL_MONT_SET + if (m->used <= 16) { + fp_montgomery_reduce_small(a, m, mp); + return; + } +#endif + + + /* now zero the buff */ + XMEMSET(c, 0, sizeof c); + pa = m->used; + + /* copy the input */ + oldused = a->used; + for (x = 0; x < oldused; x++) { + c[x] = a->dp[x]; + } + MONT_START; + + for (x = 0; x < pa; x++) { + fp_digit cy = 0; + /* get Mu for this round */ + LOOP_START; + _c = c + x; + tmpm = m->dp; + y = 0; + #if (defined(TFM_SSE2) || defined(TFM_X86_64)) + for (; y < (pa & ~7); y += 8) { + INNERMUL8; + _c += 8; + tmpm += 8; + } + #endif + + for (; y < pa; y++) { + INNERMUL; + ++_c; + } + LOOP_END; + while (cy) { + PROPCARRY; + ++_c; + } + } + + /* now copy out */ + _c = c + pa; + tmpm = a->dp; + for (x = 0; x < pa+1; x++) { + *tmpm++ = *_c++; + } + + for (; x < oldused; x++) { + *tmpm++ = 0; + } + + MONT_FINI; + + a->used = pa+1; + fp_clamp(a); + + /* if A >= m then A = A - m */ + if (fp_cmp_mag (a, m) != FP_LT) { + s_fp_sub (a, m, a); + } +} + +void fp_read_unsigned_bin(fp_int *a, unsigned char *b, int c) +{ + /* zero the int */ + fp_zero (a); + + /* If we know the endianness of this architecture, and we're using + 32-bit fp_digits, we can optimize this */ +#if (defined(LITTLE_ENDIAN_ORDER) || defined(BIG_ENDIAN_ORDER)) && defined(FP_32BIT) + /* But not for both simultaneously */ +#if defined(LITTLE_ENDIAN_ORDER) && defined(BIG_ENDIAN_ORDER) +#error Both LITTLE_ENDIAN_ORDER and BIG_ENDIAN_ORDER defined. +#endif + { + unsigned char *pd = (unsigned char *)a->dp; + + if ((unsigned)c > (FP_SIZE * sizeof(fp_digit))) { + int excess = c - (FP_SIZE * sizeof(fp_digit)); + c -= excess; + b += excess; + } + a->used = (c + sizeof(fp_digit) - 1)/sizeof(fp_digit); + /* read the bytes in */ +#ifdef BIG_ENDIAN_ORDER + { + /* Use Duff's device to unroll the loop. */ + int idx = (c - 1) & ~3; + switch (c % 4) { + case 0: do { pd[idx+0] = *b++; + case 3: pd[idx+1] = *b++; + case 2: pd[idx+2] = *b++; + case 1: pd[idx+3] = *b++; + idx -= 4; + } while ((c -= 4) > 0); + } + } +#else + for (c -= 1; c >= 0; c -= 1) { + pd[c] = *b++; + } +#endif + } +#else + /* read the bytes in */ + for (; c > 0; c--) { + fp_mul_2d (a, 8, a); + a->dp[0] |= *b++; + a->used += 1; + } +#endif + fp_clamp (a); +} + +void fp_to_unsigned_bin(fp_int *a, unsigned char *b) +{ + int x; + fp_int t; + + fp_init_copy(&t, a); + + x = 0; + while (fp_iszero (&t) == FP_NO) { + b[x++] = (unsigned char) (t.dp[0] & 255); + fp_div_2d (&t, 8, &t, NULL); + } + fp_reverse (b, x); +} + +int fp_unsigned_bin_size(fp_int *a) +{ + int size = fp_count_bits (a); + return (size / 8 + ((size & 7) != 0 ? 1 : 0)); +} + +void fp_set(fp_int *a, fp_digit b) +{ + fp_zero(a); + a->dp[0] = b; + a->used = a->dp[0] ? 1 : 0; +} + +int fp_count_bits (fp_int * a) +{ + int r; + fp_digit q; + + /* shortcut */ + if (a->used == 0) { + return 0; + } + + /* get number of digits and add that */ + r = (a->used - 1) * DIGIT_BIT; + + /* take the last digit and count the bits in it */ + q = a->dp[a->used - 1]; + while (q > ((fp_digit) 0)) { + ++r; + q >>= ((fp_digit) 1); + } + return r; +} + +int fp_leading_bit(fp_int *a) +{ + int bit = 0; + + if (a->used != 0) { + fp_digit q = a->dp[a->used - 1]; + int qSz = sizeof(fp_digit); + + while (qSz > 0) { + if ((unsigned char)q != 0) + bit = (q & 0x80) != 0; + q >>= 8; + qSz--; + } + } + + return bit; +} + +void fp_lshd(fp_int *a, int x) +{ + int y; + + /* move up and truncate as required */ + y = MIN(a->used + x - 1, (int)(FP_SIZE-1)); + + /* store new size */ + a->used = y + 1; + + /* move digits */ + for (; y >= x; y--) { + a->dp[y] = a->dp[y-x]; + } + + /* zero lower digits */ + for (; y >= 0; y--) { + a->dp[y] = 0; + } + + /* clamp digits */ + fp_clamp(a); +} + + +/* right shift by bit count */ +void fp_rshb(fp_int *c, int x) +{ + register fp_digit *tmpc, mask, shift; + fp_digit r, rr; + fp_digit D = x; + + /* mask */ + mask = (((fp_digit)1) << D) - 1; + + /* shift for lsb */ + shift = DIGIT_BIT - D; + + /* alias */ + tmpc = c->dp + (c->used - 1); + + /* carry */ + r = 0; + for (x = c->used - 1; x >= 0; x--) { + /* get the lower bits of this word in a temp */ + rr = *tmpc & mask; + + /* shift the current word and mix in the carry bits from previous word */ + *tmpc = (*tmpc >> D) | (r << shift); + --tmpc; + + /* set the carry to the carry bits of the current word found above */ + r = rr; + } +} + + +void fp_rshd(fp_int *a, int x) +{ + int y; + + /* too many digits just zero and return */ + if (x >= a->used) { + fp_zero(a); + return; + } + + /* shift */ + for (y = 0; y < a->used - x; y++) { + a->dp[y] = a->dp[y+x]; + } + + /* zero rest */ + for (; y < a->used; y++) { + a->dp[y] = 0; + } + + /* decrement count */ + a->used -= x; + fp_clamp(a); +} + +/* reverse an array, used for radix code */ +void fp_reverse (unsigned char *s, int len) +{ + int ix, iy; + unsigned char t; + + ix = 0; + iy = len - 1; + while (ix < iy) { + t = s[ix]; + s[ix] = s[iy]; + s[iy] = t; + ++ix; + --iy; + } +} + + +/* c = a - b */ +void fp_sub_d(fp_int *a, fp_digit b, fp_int *c) +{ + fp_int tmp; + fp_set(&tmp, b); + fp_sub(a, &tmp, c); +} + + +/* CyaSSL callers from normal lib */ + +/* init a new mp_int */ +int mp_init (mp_int * a) +{ + if (a) + fp_init(a); + return MP_OKAY; +} + +/* clear one (frees) */ +void mp_clear (mp_int * a) +{ + fp_zero(a); +} + +/* handle up to 6 inits */ +int mp_init_multi(mp_int* a, mp_int* b, mp_int* c, mp_int* d, mp_int* e, mp_int* f) +{ + if (a) + fp_init(a); + if (b) + fp_init(b); + if (c) + fp_init(c); + if (d) + fp_init(d); + if (e) + fp_init(e); + if (f) + fp_init(f); + + return MP_OKAY; +} + +/* high level addition (handles signs) */ +int mp_add (mp_int * a, mp_int * b, mp_int * c) +{ + fp_add(a, b, c); + return MP_OKAY; +} + +/* high level subtraction (handles signs) */ +int mp_sub (mp_int * a, mp_int * b, mp_int * c) +{ + fp_sub(a, b, c); + return MP_OKAY; +} + +/* high level multiplication (handles sign) */ +int mp_mul (mp_int * a, mp_int * b, mp_int * c) +{ + fp_mul(a, b, c); + return MP_OKAY; +} + +/* d = a * b (mod c) */ +int mp_mulmod (mp_int * a, mp_int * b, mp_int * c, mp_int * d) +{ + return fp_mulmod(a, b, c, d); +} + +/* c = a mod b, 0 <= c < b */ +int mp_mod (mp_int * a, mp_int * b, mp_int * c) +{ + return fp_mod (a, b, c); +} + +/* hac 14.61, pp608 */ +int mp_invmod (mp_int * a, mp_int * b, mp_int * c) +{ + return fp_invmod(a, b, c); +} + +/* this is a shell function that calls either the normal or Montgomery + * exptmod functions. Originally the call to the montgomery code was + * embedded in the normal function but that wasted alot of stack space + * for nothing (since 99% of the time the Montgomery code would be called) + */ +int mp_exptmod (mp_int * G, mp_int * X, mp_int * P, mp_int * Y) +{ + return fp_exptmod(G, X, P, Y); +} + +/* compare two ints (signed)*/ +int mp_cmp (mp_int * a, mp_int * b) +{ + return fp_cmp(a, b); +} + +/* compare a digit */ +int mp_cmp_d(mp_int * a, mp_digit b) +{ + return fp_cmp_d(a, b); +} + +/* get the size for an unsigned equivalent */ +int mp_unsigned_bin_size (mp_int * a) +{ + return fp_unsigned_bin_size(a); +} + +/* store in unsigned [big endian] format */ +int mp_to_unsigned_bin (mp_int * a, unsigned char *b) +{ + fp_to_unsigned_bin(a,b); + return MP_OKAY; +} + +/* reads a unsigned char array, assumes the msb is stored first [big endian] */ +int mp_read_unsigned_bin (mp_int * a, const unsigned char *b, int c) +{ + fp_read_unsigned_bin(a, (unsigned char *)b, c); + return MP_OKAY; +} + + +int mp_sub_d(fp_int *a, fp_digit b, fp_int *c) +{ + fp_sub_d(a, b, c); + return MP_OKAY; +} + + +/* fast math conversion */ +int mp_copy(fp_int* a, fp_int* b) +{ + fp_copy(a, b); + return MP_OKAY; +} + + +/* fast math conversion */ +int mp_isodd(mp_int* a) +{ + return fp_isodd(a); +} + + +/* fast math conversion */ +int mp_iszero(mp_int* a) +{ + return fp_iszero(a); +} + + +/* fast math conversion */ +int mp_count_bits (mp_int* a) +{ + return fp_count_bits(a); +} + + +int mp_leading_bit (mp_int* a) +{ + return fp_leading_bit(a); +} + + +/* fast math conversion */ +void mp_rshb (mp_int* a, int x) +{ + fp_rshb(a, x); +} + + +/* fast math wrappers */ +int mp_set_int(fp_int *a, fp_digit b) +{ + fp_set(a, b); + return MP_OKAY; +} + + +#if defined(WOLFSSL_KEY_GEN) || defined (HAVE_ECC) + +/* c = a * a (mod b) */ +int fp_sqrmod(fp_int *a, fp_int *b, fp_int *c) +{ + fp_int tmp; + fp_zero(&tmp); + fp_sqr(a, &tmp); + return fp_mod(&tmp, b, c); +} + +/* fast math conversion */ +int mp_sqrmod(mp_int *a, mp_int *b, mp_int *c) +{ + return fp_sqrmod(a, b, c); +} + +/* fast math conversion */ +int mp_montgomery_calc_normalization(mp_int *a, mp_int *b) +{ + fp_montgomery_calc_normalization(a, b); + return MP_OKAY; +} + +#endif /* WOLFSSL_KEYGEN || HAVE_ECC */ + + +#if defined(WOLFSSL_KEY_GEN) || defined(HAVE_COMP_KEY) + +static const int lnz[16] = { + 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 +}; + +/* Counts the number of lsbs which are zero before the first zero bit */ +int fp_cnt_lsb(fp_int *a) +{ + int x; + fp_digit q, qq; + + /* easy out */ + if (fp_iszero(a) == 1) { + return 0; + } + + /* scan lower digits until non-zero */ + for (x = 0; x < a->used && a->dp[x] == 0; x++); + q = a->dp[x]; + x *= DIGIT_BIT; + + /* now scan this digit until a 1 is found */ + if ((q & 1) == 0) { + do { + qq = q & 15; + x += lnz[qq]; + q >>= 4; + } while (qq == 0); + } + return x; +} + + + + +static int s_is_power_of_two(fp_digit b, int *p) +{ + int x; + + /* fast return if no power of two */ + if ((b==0) || (b & (b-1))) { + return 0; + } + + for (x = 0; x < DIGIT_BIT; x++) { + if (b == (((fp_digit)1)< cb + d == a */ +static int fp_div_d(fp_int *a, fp_digit b, fp_int *c, fp_digit *d) +{ + fp_int q; + fp_word w; + fp_digit t; + int ix; + + /* cannot divide by zero */ + if (b == 0) { + return FP_VAL; + } + + /* quick outs */ + if (b == 1 || fp_iszero(a) == 1) { + if (d != NULL) { + *d = 0; + } + if (c != NULL) { + fp_copy(a, c); + } + return FP_OKAY; + } + + /* power of two ? */ + if (s_is_power_of_two(b, &ix) == 1) { + if (d != NULL) { + *d = a->dp[0] & ((((fp_digit)1)<used; + q.sign = a->sign; + w = 0; + for (ix = a->used - 1; ix >= 0; ix--) { + w = (w << ((fp_word)DIGIT_BIT)) | ((fp_word)a->dp[ix]); + + if (w >= b) { + t = (fp_digit)(w / b); + w -= ((fp_word)t) * ((fp_word)b); + } else { + t = 0; + } + q.dp[ix] = (fp_digit)t; + } + + if (d != NULL) { + *d = (fp_digit)w; + } + + if (c != NULL) { + fp_clamp(&q); + fp_copy(&q, c); + } + + return FP_OKAY; +} + + +/* c = a mod b, 0 <= c < b */ +static int fp_mod_d(fp_int *a, fp_digit b, fp_digit *c) +{ + return fp_div_d(a, b, NULL, c); +} + +int mp_mod_d(fp_int *a, fp_digit b, fp_digit *c) +{ + return fp_mod_d(a, b, c); +} + +#endif /* defined(WOLFSSL_KEY_GEN) || defined(HAVE_COMP_KEY) */ + +#ifdef WOLFSSL_KEY_GEN + +void fp_gcd(fp_int *a, fp_int *b, fp_int *c); +void fp_lcm(fp_int *a, fp_int *b, fp_int *c); +int fp_isprime(fp_int *a); + +int mp_gcd(fp_int *a, fp_int *b, fp_int *c) +{ + fp_gcd(a, b, c); + return MP_OKAY; +} + + +int mp_lcm(fp_int *a, fp_int *b, fp_int *c) +{ + fp_lcm(a, b, c); + return MP_OKAY; +} + + +int mp_prime_is_prime(mp_int* a, int t, int* result) +{ + (void)t; + *result = fp_isprime(a); + return MP_OKAY; +} + +/* Miller-Rabin test of "a" to the base of "b" as described in + * HAC pp. 139 Algorithm 4.24 + * + * Sets result to 0 if definitely composite or 1 if probably prime. + * Randomly the chance of error is no more than 1/4 and often + * very much lower. + */ +static void fp_prime_miller_rabin (fp_int * a, fp_int * b, int *result) +{ + fp_int n1, y, r; + int s, j; + + /* default */ + *result = FP_NO; + + /* ensure b > 1 */ + if (fp_cmp_d(b, 1) != FP_GT) { + return; + } + + /* get n1 = a - 1 */ + fp_init_copy(&n1, a); + fp_sub_d(&n1, 1, &n1); + + /* set 2**s * r = n1 */ + fp_init_copy(&r, &n1); + + /* count the number of least significant bits + * which are zero + */ + s = fp_cnt_lsb(&r); + + /* now divide n - 1 by 2**s */ + fp_div_2d (&r, s, &r, NULL); + + /* compute y = b**r mod a */ + fp_init(&y); + fp_exptmod(b, &r, a, &y); + + /* if y != 1 and y != n1 do */ + if (fp_cmp_d (&y, 1) != FP_EQ && fp_cmp (&y, &n1) != FP_EQ) { + j = 1; + /* while j <= s-1 and y != n1 */ + while ((j <= (s - 1)) && fp_cmp (&y, &n1) != FP_EQ) { + fp_sqrmod (&y, a, &y); + + /* if y == 1 then composite */ + if (fp_cmp_d (&y, 1) == FP_EQ) { + return; + } + ++j; + } + + /* if y != n1 then composite */ + if (fp_cmp (&y, &n1) != FP_EQ) { + return; + } + } + + /* probably prime now */ + *result = FP_YES; +} + + +/* a few primes */ +static const fp_digit primes[256] = { + 0x0002, 0x0003, 0x0005, 0x0007, 0x000B, 0x000D, 0x0011, 0x0013, + 0x0017, 0x001D, 0x001F, 0x0025, 0x0029, 0x002B, 0x002F, 0x0035, + 0x003B, 0x003D, 0x0043, 0x0047, 0x0049, 0x004F, 0x0053, 0x0059, + 0x0061, 0x0065, 0x0067, 0x006B, 0x006D, 0x0071, 0x007F, 0x0083, + 0x0089, 0x008B, 0x0095, 0x0097, 0x009D, 0x00A3, 0x00A7, 0x00AD, + 0x00B3, 0x00B5, 0x00BF, 0x00C1, 0x00C5, 0x00C7, 0x00D3, 0x00DF, + 0x00E3, 0x00E5, 0x00E9, 0x00EF, 0x00F1, 0x00FB, 0x0101, 0x0107, + 0x010D, 0x010F, 0x0115, 0x0119, 0x011B, 0x0125, 0x0133, 0x0137, + + 0x0139, 0x013D, 0x014B, 0x0151, 0x015B, 0x015D, 0x0161, 0x0167, + 0x016F, 0x0175, 0x017B, 0x017F, 0x0185, 0x018D, 0x0191, 0x0199, + 0x01A3, 0x01A5, 0x01AF, 0x01B1, 0x01B7, 0x01BB, 0x01C1, 0x01C9, + 0x01CD, 0x01CF, 0x01D3, 0x01DF, 0x01E7, 0x01EB, 0x01F3, 0x01F7, + 0x01FD, 0x0209, 0x020B, 0x021D, 0x0223, 0x022D, 0x0233, 0x0239, + 0x023B, 0x0241, 0x024B, 0x0251, 0x0257, 0x0259, 0x025F, 0x0265, + 0x0269, 0x026B, 0x0277, 0x0281, 0x0283, 0x0287, 0x028D, 0x0293, + 0x0295, 0x02A1, 0x02A5, 0x02AB, 0x02B3, 0x02BD, 0x02C5, 0x02CF, + + 0x02D7, 0x02DD, 0x02E3, 0x02E7, 0x02EF, 0x02F5, 0x02F9, 0x0301, + 0x0305, 0x0313, 0x031D, 0x0329, 0x032B, 0x0335, 0x0337, 0x033B, + 0x033D, 0x0347, 0x0355, 0x0359, 0x035B, 0x035F, 0x036D, 0x0371, + 0x0373, 0x0377, 0x038B, 0x038F, 0x0397, 0x03A1, 0x03A9, 0x03AD, + 0x03B3, 0x03B9, 0x03C7, 0x03CB, 0x03D1, 0x03D7, 0x03DF, 0x03E5, + 0x03F1, 0x03F5, 0x03FB, 0x03FD, 0x0407, 0x0409, 0x040F, 0x0419, + 0x041B, 0x0425, 0x0427, 0x042D, 0x043F, 0x0443, 0x0445, 0x0449, + 0x044F, 0x0455, 0x045D, 0x0463, 0x0469, 0x047F, 0x0481, 0x048B, + + 0x0493, 0x049D, 0x04A3, 0x04A9, 0x04B1, 0x04BD, 0x04C1, 0x04C7, + 0x04CD, 0x04CF, 0x04D5, 0x04E1, 0x04EB, 0x04FD, 0x04FF, 0x0503, + 0x0509, 0x050B, 0x0511, 0x0515, 0x0517, 0x051B, 0x0527, 0x0529, + 0x052F, 0x0551, 0x0557, 0x055D, 0x0565, 0x0577, 0x0581, 0x058F, + 0x0593, 0x0595, 0x0599, 0x059F, 0x05A7, 0x05AB, 0x05AD, 0x05B3, + 0x05BF, 0x05C9, 0x05CB, 0x05CF, 0x05D1, 0x05D5, 0x05DB, 0x05E7, + 0x05F3, 0x05FB, 0x0607, 0x060D, 0x0611, 0x0617, 0x061F, 0x0623, + 0x062B, 0x062F, 0x063D, 0x0641, 0x0647, 0x0649, 0x064D, 0x0653 +}; + +int fp_isprime(fp_int *a) +{ + fp_int b; + fp_digit d = 0; + int r, res; + + /* do trial division */ + for (r = 0; r < 256; r++) { + fp_mod_d(a, primes[r], &d); + if (d == 0) { + return FP_NO; + } + } + + /* now do 8 miller rabins */ + fp_init(&b); + for (r = 0; r < 8; r++) { + fp_set(&b, primes[r]); + fp_prime_miller_rabin(a, &b, &res); + if (res == FP_NO) { + return FP_NO; + } + } + return FP_YES; +} + + +/* c = [a, b] */ +void fp_lcm(fp_int *a, fp_int *b, fp_int *c) +{ + fp_int t1, t2; + + fp_init(&t1); + fp_init(&t2); + fp_gcd(a, b, &t1); + if (fp_cmp_mag(a, b) == FP_GT) { + fp_div(a, &t1, &t2, NULL); + fp_mul(b, &t2, c); + } else { + fp_div(b, &t1, &t2, NULL); + fp_mul(a, &t2, c); + } +} + + + +/* c = (a, b) */ +void fp_gcd(fp_int *a, fp_int *b, fp_int *c) +{ + fp_int u, v, r; + + /* either zero than gcd is the largest */ + if (fp_iszero (a) == 1 && fp_iszero (b) == 0) { + fp_abs (b, c); + return; + } + if (fp_iszero (a) == 0 && fp_iszero (b) == 1) { + fp_abs (a, c); + return; + } + + /* optimized. At this point if a == 0 then + * b must equal zero too + */ + if (fp_iszero (a) == 1) { + fp_zero(c); + return; + } + + /* sort inputs */ + if (fp_cmp_mag(a, b) != FP_LT) { + fp_init_copy(&u, a); + fp_init_copy(&v, b); + } else { + fp_init_copy(&u, b); + fp_init_copy(&v, a); + } + + fp_zero(&r); + while (fp_iszero(&v) == FP_NO) { + fp_mod(&u, &v, &r); + fp_copy(&v, &u); + fp_copy(&r, &v); + } + fp_copy(&u, c); +} + +#endif /* WOLFSSL_KEY_GEN */ + + +#if defined(HAVE_ECC) || !defined(NO_PWDBASED) +/* c = a + b */ +void fp_add_d(fp_int *a, fp_digit b, fp_int *c) +{ + fp_int tmp; + fp_set(&tmp, b); + fp_add(a,&tmp,c); +} + +/* external compatibility */ +int mp_add_d(fp_int *a, fp_digit b, fp_int *c) +{ + fp_add_d(a, b, c); + return MP_OKAY; +} + +#endif /* HAVE_ECC || !NO_PWDBASED */ + + +#ifdef HAVE_ECC + +/* chars used in radix conversions */ +static const char *fp_s_rmap = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/"; + +static int fp_read_radix(fp_int *a, const char *str, int radix) +{ + int y, neg; + char ch; + + /* make sure the radix is ok */ + if (radix < 2 || radix > 64) { + return FP_VAL; + } + + /* if the leading digit is a + * minus set the sign to negative. + */ + if (*str == '-') { + ++str; + neg = FP_NEG; + } else { + neg = FP_ZPOS; + } + + /* set the integer to the default of zero */ + fp_zero (a); + + /* process each digit of the string */ + while (*str) { + /* if the radix < 36 the conversion is case insensitive + * this allows numbers like 1AB and 1ab to represent the same value + * [e.g. in hex] + */ + ch = (char) ((radix < 36) ? XTOUPPER(*str) : *str); + for (y = 0; y < 64; y++) { + if (ch == fp_s_rmap[y]) { + break; + } + } + + /* if the char was found in the map + * and is less than the given radix add it + * to the number, otherwise exit the loop. + */ + if (y < radix) { + fp_mul_d (a, (fp_digit) radix, a); + fp_add_d (a, (fp_digit) y, a); + } else { + break; + } + ++str; + } + + /* set the sign only if a != 0 */ + if (fp_iszero(a) != FP_YES) { + a->sign = neg; + } + return FP_OKAY; +} + +/* fast math conversion */ +int mp_read_radix(mp_int *a, const char *str, int radix) +{ + return fp_read_radix(a, str, radix); +} + +/* fast math conversion */ +int mp_set(fp_int *a, fp_digit b) +{ + fp_set(a,b); + return MP_OKAY; +} + +/* fast math conversion */ +int mp_sqr(fp_int *A, fp_int *B) +{ + fp_sqr(A, B); + return MP_OKAY; +} + +/* fast math conversion */ +int mp_montgomery_reduce(fp_int *a, fp_int *m, fp_digit mp) +{ + fp_montgomery_reduce(a, m, mp); + return MP_OKAY; +} + + +/* fast math conversion */ +int mp_montgomery_setup(fp_int *a, fp_digit *rho) +{ + return fp_montgomery_setup(a, rho); +} + +int mp_div_2(fp_int * a, fp_int * b) +{ + fp_div_2(a, b); + return MP_OKAY; +} + + +int mp_init_copy(fp_int * a, fp_int * b) +{ + fp_init_copy(a, b); + return MP_OKAY; +} + + +#ifdef HAVE_COMP_KEY + +int mp_cnt_lsb(fp_int* a) +{ + fp_cnt_lsb(a); + return MP_OKAY; +} + +int mp_div_2d(fp_int* a, int b, fp_int* c, fp_int* d) +{ + fp_div_2d(a, b, c, d); + return MP_OKAY; +} + +#endif /* HAVE_COMP_KEY */ + + +#endif /* HAVE_ECC */ + +#endif /* USE_FAST_MATH */ + +#endif /* HAVE_FIPS */ + diff --git a/wolfssl/wolfcrypt/integer.h b/wolfssl/wolfcrypt/integer.h index f8c2b7147..f801e4388 100644 --- a/wolfssl/wolfcrypt/integer.h +++ b/wolfssl/wolfcrypt/integer.h @@ -25,8 +25,8 @@ */ -#ifndef CTAO_CRYPT_INTEGER_H -#define CTAO_CRYPT_INTEGER_H +#ifndef WOLF_CRYPT_INTEGER_H +#define WOLF_CRYPT_INTEGER_H /* may optionally use fast math instead, not yet supported on all platforms and may not be faster on all @@ -320,5 +320,5 @@ int mp_mod_d(mp_int* a, mp_digit b, mp_digit* c); #endif /* USE_FAST_MATH */ -#endif /* CTAO_CRYPT_INTEGER_H */ +#endif /* WOLF_CRYPT_INTEGER_H */ diff --git a/wolfssl/wolfcrypt/tfm.h b/wolfssl/wolfcrypt/tfm.h index 2f139cc56..73e936e27 100644 --- a/wolfssl/wolfcrypt/tfm.h +++ b/wolfssl/wolfcrypt/tfm.h @@ -2,14 +2,14 @@ * * Copyright (C) 2006-2014 wolfSSL Inc. * - * This file is part of CyaSSL. + * This file is part of wolfSSL. (formerly known as CyaSSL) * - * CyaSSL is free software; you can redistribute it and/or modify + * wolfSSL is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * - * CyaSSL is distributed in the hope that it will be useful, + * wolfSSL is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. @@ -35,678 +35,678 @@ #ifndef WOLF_CRYPT_TFM_H #define WOLF_CRYPT_TFM_H -#include +/* for fips compatibility @wc_fips */ +#ifdef HAVE_FIPS + #include +#else +#include +#ifndef CHAR_BIT + #include #endif -// -//#include -//#ifndef CHAR_BIT -// #include -//#endif -// -// -//#ifdef __cplusplus -// extern "C" { -//#endif -// -//#ifndef MIN -// #define MIN(x,y) ((x)<(y)?(x):(y)) -//#endif -// -//#ifndef MAX -// #define MAX(x,y) ((x)>(y)?(x):(y)) -//#endif -// -// -//#ifndef NO_64BIT -///* autodetect x86-64 and make sure we are using 64-bit digits with x86-64 asm */ -//#if defined(__x86_64__) -// #if defined(TFM_X86) || defined(TFM_SSE2) || defined(TFM_ARM) -// #error x86-64 detected, x86-32/SSE2/ARM optimizations are not valid! -// #endif -// #if !defined(TFM_X86_64) && !defined(TFM_NO_ASM) -// #define TFM_X86_64 -// #endif -//#endif -//#if defined(TFM_X86_64) -// #if !defined(FP_64BIT) -// #define FP_64BIT -// #endif -//#endif -///* use 64-bit digit even if not using asm on x86_64 */ -//#if defined(__x86_64__) && !defined(FP_64BIT) -// #define FP_64BIT -//#endif -///* if intel compiler doesn't provide 128 bit type don't turn on 64bit */ -//#if defined(FP_64BIT) && defined(__INTEL_COMPILER) && !defined(HAVE___UINT128_T) -// #undef FP_64BIT -// #undef TFM_X86_64 -//#endif -//#endif /* NO_64BIT */ -// -///* try to detect x86-32 */ -//#if defined(__i386__) && !defined(TFM_SSE2) -// #if defined(TFM_X86_64) || defined(TFM_ARM) -// #error x86-32 detected, x86-64/ARM optimizations are not valid! -// #endif -// #if !defined(TFM_X86) && !defined(TFM_NO_ASM) -// #define TFM_X86 -// #endif -//#endif -// -///* make sure we're 32-bit for x86-32/sse/arm/ppc32 */ -//#if (defined(TFM_X86) || defined(TFM_SSE2) || defined(TFM_ARM) || defined(TFM_PPC32)) && defined(FP_64BIT) -// #warning x86-32, SSE2 and ARM, PPC32 optimizations require 32-bit digits (undefining) -// #undef FP_64BIT -//#endif -// -///* multi asms? */ -//#ifdef TFM_X86 -// #define TFM_ASM -//#endif -//#ifdef TFM_X86_64 -// #ifdef TFM_ASM -// #error TFM_ASM already defined! -// #endif -// #define TFM_ASM -//#endif -//#ifdef TFM_SSE2 -// #ifdef TFM_ASM -// #error TFM_ASM already defined! -// #endif -// #define TFM_ASM -//#endif -//#ifdef TFM_ARM -// #ifdef TFM_ASM -// #error TFM_ASM already defined! -// #endif -// #define TFM_ASM -//#endif -//#ifdef TFM_PPC32 -// #ifdef TFM_ASM -// #error TFM_ASM already defined! -// #endif -// #define TFM_ASM -//#endif -//#ifdef TFM_PPC64 -// #ifdef TFM_ASM -// #error TFM_ASM already defined! -// #endif -// #define TFM_ASM -//#endif -//#ifdef TFM_AVR32 -// #ifdef TFM_ASM -// #error TFM_ASM already defined! -// #endif -// #define TFM_ASM -//#endif -// -///* we want no asm? */ -//#ifdef TFM_NO_ASM -// #undef TFM_X86 -// #undef TFM_X86_64 -// #undef TFM_SSE2 -// #undef TFM_ARM -// #undef TFM_PPC32 -// #undef TFM_PPC64 -// #undef TFM_AVR32 -// #undef TFM_ASM -//#endif -// -///* ECC helpers */ -//#ifdef TFM_ECC192 -// #ifdef FP_64BIT -// #define TFM_MUL3 -// #define TFM_SQR3 -// #else -// #define TFM_MUL6 -// #define TFM_SQR6 -// #endif -//#endif -// -//#ifdef TFM_ECC224 -// #ifdef FP_64BIT -// #define TFM_MUL4 -// #define TFM_SQR4 -// #else -// #define TFM_MUL7 -// #define TFM_SQR7 -// #endif -//#endif -// -//#ifdef TFM_ECC256 -// #ifdef FP_64BIT -// #define TFM_MUL4 -// #define TFM_SQR4 -// #else -// #define TFM_MUL8 -// #define TFM_SQR8 -// #endif -//#endif -// -//#ifdef TFM_ECC384 -// #ifdef FP_64BIT -// #define TFM_MUL6 -// #define TFM_SQR6 -// #else -// #define TFM_MUL12 -// #define TFM_SQR12 -// #endif -//#endif -// -//#ifdef TFM_ECC521 -// #ifdef FP_64BIT -// #define TFM_MUL9 -// #define TFM_SQR9 -// #else -// #define TFM_MUL17 -// #define TFM_SQR17 -// #endif -//#endif -// -// -///* some default configurations. -// */ -//#if defined(FP_64BIT) -// /* for GCC only on supported platforms */ -// typedef unsigned long long fp_digit; /* 64bit, 128 uses mode(TI) below */ -// typedef unsigned long fp_word __attribute__ ((mode(TI))); -//#else -// #if defined(_MSC_VER) || defined(__BORLANDC__) -// typedef unsigned __int64 ulong64; -// #else -// typedef unsigned long long ulong64; -// #endif -// -// #ifndef NO_64BIT -// typedef unsigned int fp_digit; -// typedef ulong64 fp_word; -// #define FP_32BIT -// #else -// /* some procs like coldfire prefer not to place multiply into 64bit type -// even though it exists */ -// typedef unsigned short fp_digit; -// typedef unsigned int fp_word; -// #endif -//#endif -// -///* # of digits this is */ -//#define DIGIT_BIT (int)((CHAR_BIT) * sizeof(fp_digit)) -// -///* Max size of any number in bits. Basically the largest size you will be -// * multiplying should be half [or smaller] of FP_MAX_SIZE-four_digit -// * -// * It defaults to 4096-bits [allowing multiplications upto 2048x2048 bits ] -// */ -//#ifndef FP_MAX_BITS -// #define FP_MAX_BITS 4096 -//#endif -//#define FP_MAX_SIZE (FP_MAX_BITS+(8*DIGIT_BIT)) -// -///* will this lib work? */ -//#if (CHAR_BIT & 7) -// #error CHAR_BIT must be a multiple of eight. -//#endif -//#if FP_MAX_BITS % CHAR_BIT -// #error FP_MAX_BITS must be a multiple of CHAR_BIT -//#endif -// -//#define FP_MASK (fp_digit)(-1) -//#define FP_SIZE (FP_MAX_SIZE/DIGIT_BIT) -// -///* signs */ -//#define FP_ZPOS 0 -//#define FP_NEG 1 -// -///* return codes */ -//#define FP_OKAY 0 -//#define FP_VAL 1 -//#define FP_MEM 2 -// -///* equalities */ -//#define FP_LT -1 /* less than */ -//#define FP_EQ 0 /* equal to */ -//#define FP_GT 1 /* greater than */ -// -///* replies */ -//#define FP_YES 1 /* yes response */ -//#define FP_NO 0 /* no response */ -// -///* a FP type */ -//typedef struct { -// fp_digit dp[FP_SIZE]; -// int used, -// sign; -//} fp_int; -// -///* externally define this symbol to ignore the default settings, useful for changing the build from the make process */ -//#ifndef TFM_ALREADY_SET -// -///* do we want the large set of small multiplications ? -// Enable these if you are going to be doing a lot of small (<= 16 digit) multiplications say in ECC -// Or if you're on a 64-bit machine doing RSA as a 1024-bit integer == 16 digits ;-) -// */ -///* need to refactor the function */ -///*#define TFM_SMALL_SET */ -// -///* do we want huge code -// Enable these if you are doing 20, 24, 28, 32, 48, 64 digit multiplications (useful for RSA) -// Less important on 64-bit machines as 32 digits == 2048 bits -// */ -//#if 0 -//#define TFM_MUL3 -//#define TFM_MUL4 -//#define TFM_MUL6 -//#define TFM_MUL7 -//#define TFM_MUL8 -//#define TFM_MUL9 -//#define TFM_MUL12 -//#define TFM_MUL17 -//#endif -//#ifdef TFM_HUGE_SET -//#define TFM_MUL20 -//#define TFM_MUL24 -//#define TFM_MUL28 -//#define TFM_MUL32 -//#if (FP_MAX_BITS >= 6144) && defined(FP_64BIT) -// #define TFM_MUL48 -//#endif -//#if (FP_MAX_BITS >= 8192) && defined(FP_64BIT) -// #define TFM_MUL64 -//#endif -//#endif -// -//#if 0 -//#define TFM_SQR3 -//#define TFM_SQR4 -//#define TFM_SQR6 -//#define TFM_SQR7 -//#define TFM_SQR8 -//#define TFM_SQR9 -//#define TFM_SQR12 -//#define TFM_SQR17 -//#endif -//#ifdef TFM_HUGE_SET -//#define TFM_SQR20 -//#define TFM_SQR24 -//#define TFM_SQR28 -//#define TFM_SQR32 -//#define TFM_SQR48 -//#define TFM_SQR64 -//#endif -// -///* do we want some overflow checks -// Not required if you make sure your numbers are within range (e.g. by default a modulus for fp_exptmod() can only be upto 2048 bits long) -// */ -///* #define TFM_CHECK */ -// -///* Is the target a P4 Prescott -// */ -///* #define TFM_PRESCOTT */ -// -///* Do we want timing resistant fp_exptmod() ? -// * This makes it slower but also timing invariant with respect to the exponent -// */ -///* #define TFM_TIMING_RESISTANT */ -// -//#endif /* TFM_ALREADY_SET */ -// -///* functions */ -// -///* returns a TFM ident string useful for debugging... */ -///*const char *fp_ident(void);*/ -// -///* initialize [or zero] an fp int */ -//#define fp_init(a) (void)XMEMSET((a), 0, sizeof(fp_int)) -//#define fp_zero(a) fp_init(a) -// -///* zero/even/odd ? */ -//#define fp_iszero(a) (((a)->used == 0) ? FP_YES : FP_NO) -//#define fp_iseven(a) (((a)->used >= 0 && (((a)->dp[0] & 1) == 0)) ? FP_YES : FP_NO) -//#define fp_isodd(a) (((a)->used > 0 && (((a)->dp[0] & 1) == 1)) ? FP_YES : FP_NO) -// -///* set to a small digit */ -//void fp_set(fp_int *a, fp_digit b); -// -///* copy from a to b */ -//#define fp_copy(a, b) (void)(((a) != (b)) ? ((void)XMEMCPY((b), (a), sizeof(fp_int))) : (void)0) -//#define fp_init_copy(a, b) fp_copy(b, a) -// -///* clamp digits */ -//#define fp_clamp(a) { while ((a)->used && (a)->dp[(a)->used-1] == 0) --((a)->used); (a)->sign = (a)->used ? (a)->sign : FP_ZPOS; } -// -///* negate and absolute */ -//#define fp_neg(a, b) { fp_copy(a, b); (b)->sign ^= 1; fp_clamp(b); } -//#define fp_abs(a, b) { fp_copy(a, b); (b)->sign = 0; } -// -///* right shift x digits */ -//void fp_rshd(fp_int *a, int x); -// -///* right shift x bits */ -//void fp_rshb(fp_int *a, int x); -// -///* left shift x digits */ -//void fp_lshd(fp_int *a, int x); -// -///* signed comparison */ -//int fp_cmp(fp_int *a, fp_int *b); -// -///* unsigned comparison */ -//int fp_cmp_mag(fp_int *a, fp_int *b); -// -///* power of 2 operations */ -//void fp_div_2d(fp_int *a, int b, fp_int *c, fp_int *d); -//void fp_mod_2d(fp_int *a, int b, fp_int *c); -//void fp_mul_2d(fp_int *a, int b, fp_int *c); -//void fp_2expt (fp_int *a, int b); -//void fp_mul_2(fp_int *a, fp_int *c); -//void fp_div_2(fp_int *a, fp_int *c); -// -///* Counts the number of lsbs which are zero before the first zero bit */ -//int fp_cnt_lsb(fp_int *a); -// -///* c = a + b */ -//void fp_add(fp_int *a, fp_int *b, fp_int *c); -// -///* c = a - b */ -//void fp_sub(fp_int *a, fp_int *b, fp_int *c); -// -///* c = a * b */ -//void fp_mul(fp_int *a, fp_int *b, fp_int *c); -// -///* b = a*a */ -//void fp_sqr(fp_int *a, fp_int *b); -// -///* a/b => cb + d == a */ -//int fp_div(fp_int *a, fp_int *b, fp_int *c, fp_int *d); -// -///* c = a mod b, 0 <= c < b */ -//int fp_mod(fp_int *a, fp_int *b, fp_int *c); -// -///* compare against a single digit */ -//int fp_cmp_d(fp_int *a, fp_digit b); -// -///* c = a + b */ -//void fp_add_d(fp_int *a, fp_digit b, fp_int *c); -// -///* c = a - b */ -//void fp_sub_d(fp_int *a, fp_digit b, fp_int *c); -// -///* c = a * b */ -//void fp_mul_d(fp_int *a, fp_digit b, fp_int *c); -// -///* a/b => cb + d == a */ -///*int fp_div_d(fp_int *a, fp_digit b, fp_int *c, fp_digit *d);*/ -// -///* c = a mod b, 0 <= c < b */ -///*int fp_mod_d(fp_int *a, fp_digit b, fp_digit *c);*/ -// -///* ---> number theory <--- */ -///* d = a + b (mod c) */ -///*int fp_addmod(fp_int *a, fp_int *b, fp_int *c, fp_int *d);*/ -// -///* d = a - b (mod c) */ -///*int fp_submod(fp_int *a, fp_int *b, fp_int *c, fp_int *d);*/ -// -///* d = a * b (mod c) */ -//int fp_mulmod(fp_int *a, fp_int *b, fp_int *c, fp_int *d); -// -///* c = a * a (mod b) */ -//int fp_sqrmod(fp_int *a, fp_int *b, fp_int *c); -// -///* c = 1/a (mod b) */ -//int fp_invmod(fp_int *a, fp_int *b, fp_int *c); -// -///* c = (a, b) */ -///*void fp_gcd(fp_int *a, fp_int *b, fp_int *c);*/ -// -///* c = [a, b] */ -///*void fp_lcm(fp_int *a, fp_int *b, fp_int *c);*/ -// -///* setups the montgomery reduction */ -//int fp_montgomery_setup(fp_int *a, fp_digit *mp); -// -///* computes a = B**n mod b without division or multiplication useful for -// * normalizing numbers in a Montgomery system. -// */ -//void fp_montgomery_calc_normalization(fp_int *a, fp_int *b); -// -///* computes x/R == x (mod N) via Montgomery Reduction */ -//void fp_montgomery_reduce(fp_int *a, fp_int *m, fp_digit mp); -// -///* d = a**b (mod c) */ -//int fp_exptmod(fp_int *a, fp_int *b, fp_int *c, fp_int *d); -// -///* primality stuff */ -// -///* perform a Miller-Rabin test of a to the base b and store result in "result" */ -///*void fp_prime_miller_rabin (fp_int * a, fp_int * b, int *result);*/ -// -///* 256 trial divisions + 8 Miller-Rabins, returns FP_YES if probable prime */ -///*int fp_isprime(fp_int *a);*/ -// -///* Primality generation flags */ -///*#define TFM_PRIME_BBS 0x0001 */ /* BBS style prime */ -///*#define TFM_PRIME_SAFE 0x0002 */ /* Safe prime (p-1)/2 == prime */ -///*#define TFM_PRIME_2MSB_OFF 0x0004 */ /* force 2nd MSB to 0 */ -///*#define TFM_PRIME_2MSB_ON 0x0008 */ /* force 2nd MSB to 1 */ -// -///* callback for fp_prime_random, should fill dst with random bytes and return how many read [upto len] */ -///*typedef int tfm_prime_callback(unsigned char *dst, int len, void *dat);*/ -// -///*#define fp_prime_random(a, t, size, bbs, cb, dat) fp_prime_random_ex(a, t, ((size) * 8) + 1, (bbs==1)?TFM_PRIME_BBS:0, cb, dat)*/ -// -///*int fp_prime_random_ex(fp_int *a, int t, int size, int flags, tfm_prime_callback cb, void *dat);*/ -// -///* radix conersions */ -//int fp_count_bits(fp_int *a); -//int fp_leading_bit(fp_int *a); -// -//int fp_unsigned_bin_size(fp_int *a); -//void fp_read_unsigned_bin(fp_int *a, unsigned char *b, int c); -//void fp_to_unsigned_bin(fp_int *a, unsigned char *b); -// -///*int fp_signed_bin_size(fp_int *a);*/ -///*void fp_read_signed_bin(fp_int *a, unsigned char *b, int c);*/ -///*void fp_to_signed_bin(fp_int *a, unsigned char *b);*/ -// -///*int fp_read_radix(fp_int *a, char *str, int radix);*/ -///*int fp_toradix(fp_int *a, char *str, int radix);*/ -///*int fp_toradix_n(fp_int * a, char *str, int radix, int maxlen);*/ -// -// -///* VARIOUS LOW LEVEL STUFFS */ -//void s_fp_add(fp_int *a, fp_int *b, fp_int *c); -//void s_fp_sub(fp_int *a, fp_int *b, fp_int *c); -//void fp_reverse(unsigned char *s, int len); -// -//void fp_mul_comba(fp_int *a, fp_int *b, fp_int *c); -// -//#ifdef TFM_SMALL_SET -//void fp_mul_comba_small(fp_int *a, fp_int *b, fp_int *c); -//#endif -// -//#ifdef TFM_MUL3 -//void fp_mul_comba3(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL4 -//void fp_mul_comba4(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL6 -//void fp_mul_comba6(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL7 -//void fp_mul_comba7(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL8 -//void fp_mul_comba8(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL9 -//void fp_mul_comba9(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL12 -//void fp_mul_comba12(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL17 -//void fp_mul_comba17(fp_int *a, fp_int *b, fp_int *c); -//#endif -// -//#ifdef TFM_MUL20 -//void fp_mul_comba20(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL24 -//void fp_mul_comba24(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL28 -//void fp_mul_comba28(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL32 -//void fp_mul_comba32(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL48 -//void fp_mul_comba48(fp_int *a, fp_int *b, fp_int *c); -//#endif -//#ifdef TFM_MUL64 -//void fp_mul_comba64(fp_int *a, fp_int *b, fp_int *c); -//#endif -// -//void fp_sqr_comba(fp_int *a, fp_int *b); -// -//#ifdef TFM_SMALL_SET -//void fp_sqr_comba_small(fp_int *a, fp_int *b); -//#endif -// -//#ifdef TFM_SQR3 -//void fp_sqr_comba3(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR4 -//void fp_sqr_comba4(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR6 -//void fp_sqr_comba6(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR7 -//void fp_sqr_comba7(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR8 -//void fp_sqr_comba8(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR9 -//void fp_sqr_comba9(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR12 -//void fp_sqr_comba12(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR17 -//void fp_sqr_comba17(fp_int *a, fp_int *b); -//#endif -// -//#ifdef TFM_SQR20 -//void fp_sqr_comba20(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR24 -//void fp_sqr_comba24(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR28 -//void fp_sqr_comba28(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR32 -//void fp_sqr_comba32(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR48 -//void fp_sqr_comba48(fp_int *a, fp_int *b); -//#endif -//#ifdef TFM_SQR64 -//void fp_sqr_comba64(fp_int *a, fp_int *b); -//#endif -///*extern const char *fp_s_rmap;*/ -// -// -///** -// * Used by CyaSSL -// */ -// -///* Types */ -// typedef fp_digit mp_digit; -// typedef fp_word mp_word; -// typedef fp_int mp_int; -// -///* Constants */ -// #define MP_LT FP_LT /* less than */ -// #define MP_EQ FP_EQ /* equal to */ -// #define MP_GT FP_GT /* greater than */ -// #define MP_VAL FP_VAL /* invalid */ -// #define MP_OKAY FP_OKAY /* ok result */ -// #define MP_NO FP_NO /* yes/no result */ -// #define MP_YES FP_YES /* yes/no result */ -// -///* Prototypes */ -//#define mp_zero(a) fp_zero(a) -//#define mp_iseven(a) fp_iseven(a) -//int mp_init (mp_int * a); -//void mp_clear (mp_int * a); -//int mp_init_multi(mp_int* a, mp_int* b, mp_int* c, mp_int* d, mp_int* e, mp_int* f); -// -//int mp_add (mp_int * a, mp_int * b, mp_int * c); -//int mp_sub (mp_int * a, mp_int * b, mp_int * c); -//int mp_add_d (mp_int * a, mp_digit b, mp_int * c); -// -//int mp_mul (mp_int * a, mp_int * b, mp_int * c); -//int mp_mulmod (mp_int * a, mp_int * b, mp_int * c, mp_int * d); -//int mp_mod(mp_int *a, mp_int *b, mp_int *c); -//int mp_invmod(mp_int *a, mp_int *b, mp_int *c); -//int mp_exptmod (mp_int * g, mp_int * x, mp_int * p, mp_int * y); -// -//int mp_cmp(mp_int *a, mp_int *b); -//int mp_cmp_d(mp_int *a, mp_digit b); -// -//int mp_unsigned_bin_size(mp_int * a); -//int mp_read_unsigned_bin (mp_int * a, const unsigned char *b, int c); -//int mp_to_unsigned_bin (mp_int * a, unsigned char *b); -// -//int mp_sub_d(fp_int *a, fp_digit b, fp_int *c); -//int mp_copy(fp_int* a, fp_int* b); -//int mp_isodd(mp_int* a); -//int mp_iszero(mp_int* a); -//int mp_count_bits(mp_int *a); -//int mp_leading_bit(mp_int *a); -//int mp_set_int(fp_int *a, fp_digit b); -//void mp_rshb(mp_int *a, int x); -// -//#ifdef HAVE_ECC -// int mp_read_radix(mp_int* a, const char* str, int radix); -// int mp_set(fp_int *a, fp_digit b); -// int mp_sqr(fp_int *a, fp_int *b); -// int mp_montgomery_reduce(fp_int *a, fp_int *m, fp_digit mp); -// int mp_montgomery_setup(fp_int *a, fp_digit *rho); -// int mp_div_2(fp_int * a, fp_int * b); -// int mp_init_copy(fp_int * a, fp_int * b); -//#endif -// -//#if defined(HAVE_ECC) || defined(CYASSL_KEY_GEN) -// int mp_sqrmod(mp_int* a, mp_int* b, mp_int* c); -// int mp_montgomery_calc_normalization(mp_int *a, mp_int *b); -//#endif -// -//#ifdef CYASSL_KEY_GEN -//int mp_gcd(fp_int *a, fp_int *b, fp_int *c); -//int mp_lcm(fp_int *a, fp_int *b, fp_int *c); -//int mp_prime_is_prime(mp_int* a, int t, int* result); -//#endif /* CYASSL_KEY_GEN */ -// -//int mp_cnt_lsb(fp_int *a); -//int mp_div_2d(fp_int *a, int b, fp_int *c, fp_int *d); -//int mp_mod_d(fp_int* a, fp_digit b, fp_digit* c); -// -//CYASSL_API word32 CheckRunTimeFastMath(void); -// -///* If user uses RSA, DH, DSA, or ECC math lib directly then fast math FP_SIZE -// must match, return 1 if a match otherwise 0 */ -//#define CheckFastMathSettings() (FP_SIZE == CheckRunTimeFastMath()) -//#ifdef __cplusplus -// } -//#endif -// -// -//#endif /* CTAO_CRYPT_TFM_H */ + +#ifdef __cplusplus + extern "C" { +#endif + +#ifndef MIN + #define MIN(x,y) ((x)<(y)?(x):(y)) +#endif + +#ifndef MAX + #define MAX(x,y) ((x)>(y)?(x):(y)) +#endif + + +#ifndef NO_64BIT +/* autodetect x86-64 and make sure we are using 64-bit digits with x86-64 asm */ +#if defined(__x86_64__) + #if defined(TFM_X86) || defined(TFM_SSE2) || defined(TFM_ARM) + #error x86-64 detected, x86-32/SSE2/ARM optimizations are not valid! + #endif + #if !defined(TFM_X86_64) && !defined(TFM_NO_ASM) + #define TFM_X86_64 + #endif +#endif +#if defined(TFM_X86_64) + #if !defined(FP_64BIT) + #define FP_64BIT + #endif +#endif +/* use 64-bit digit even if not using asm on x86_64 */ +#if defined(__x86_64__) && !defined(FP_64BIT) + #define FP_64BIT +#endif +/* if intel compiler doesn't provide 128 bit type don't turn on 64bit */ +#if defined(FP_64BIT) && defined(__INTEL_COMPILER) && !defined(HAVE___UINT128_T) + #undef FP_64BIT + #undef TFM_X86_64 +#endif +#endif /* NO_64BIT */ + +/* try to detect x86-32 */ +#if defined(__i386__) && !defined(TFM_SSE2) + #if defined(TFM_X86_64) || defined(TFM_ARM) + #error x86-32 detected, x86-64/ARM optimizations are not valid! + #endif + #if !defined(TFM_X86) && !defined(TFM_NO_ASM) + #define TFM_X86 + #endif +#endif + +/* make sure we're 32-bit for x86-32/sse/arm/ppc32 */ +#if (defined(TFM_X86) || defined(TFM_SSE2) || defined(TFM_ARM) || defined(TFM_PPC32)) && defined(FP_64BIT) + #warning x86-32, SSE2 and ARM, PPC32 optimizations require 32-bit digits (undefining) + #undef FP_64BIT +#endif + +/* multi asms? */ +#ifdef TFM_X86 + #define TFM_ASM +#endif +#ifdef TFM_X86_64 + #ifdef TFM_ASM + #error TFM_ASM already defined! + #endif + #define TFM_ASM +#endif +#ifdef TFM_SSE2 + #ifdef TFM_ASM + #error TFM_ASM already defined! + #endif + #define TFM_ASM +#endif +#ifdef TFM_ARM + #ifdef TFM_ASM + #error TFM_ASM already defined! + #endif + #define TFM_ASM +#endif +#ifdef TFM_PPC32 + #ifdef TFM_ASM + #error TFM_ASM already defined! + #endif + #define TFM_ASM +#endif +#ifdef TFM_PPC64 + #ifdef TFM_ASM + #error TFM_ASM already defined! + #endif + #define TFM_ASM +#endif +#ifdef TFM_AVR32 + #ifdef TFM_ASM + #error TFM_ASM already defined! + #endif + #define TFM_ASM +#endif + +/* we want no asm? */ +#ifdef TFM_NO_ASM + #undef TFM_X86 + #undef TFM_X86_64 + #undef TFM_SSE2 + #undef TFM_ARM + #undef TFM_PPC32 + #undef TFM_PPC64 + #undef TFM_AVR32 + #undef TFM_ASM +#endif + +/* ECC helpers */ +#ifdef TFM_ECC192 + #ifdef FP_64BIT + #define TFM_MUL3 + #define TFM_SQR3 + #else + #define TFM_MUL6 + #define TFM_SQR6 + #endif +#endif + +#ifdef TFM_ECC224 + #ifdef FP_64BIT + #define TFM_MUL4 + #define TFM_SQR4 + #else + #define TFM_MUL7 + #define TFM_SQR7 + #endif +#endif + +#ifdef TFM_ECC256 + #ifdef FP_64BIT + #define TFM_MUL4 + #define TFM_SQR4 + #else + #define TFM_MUL8 + #define TFM_SQR8 + #endif +#endif + +#ifdef TFM_ECC384 + #ifdef FP_64BIT + #define TFM_MUL6 + #define TFM_SQR6 + #else + #define TFM_MUL12 + #define TFM_SQR12 + #endif +#endif + +#ifdef TFM_ECC521 + #ifdef FP_64BIT + #define TFM_MUL9 + #define TFM_SQR9 + #else + #define TFM_MUL17 + #define TFM_SQR17 + #endif +#endif + + +/* some default configurations. + */ +#if defined(FP_64BIT) + /* for GCC only on supported platforms */ + typedef unsigned long long fp_digit; /* 64bit, 128 uses mode(TI) below */ + typedef unsigned long fp_word __attribute__ ((mode(TI))); +#else + #if defined(_MSC_VER) || defined(__BORLANDC__) + typedef unsigned __int64 ulong64; + #else + typedef unsigned long long ulong64; + #endif + + #ifndef NO_64BIT + typedef unsigned int fp_digit; + typedef ulong64 fp_word; + #define FP_32BIT + #else + /* some procs like coldfire prefer not to place multiply into 64bit type + even though it exists */ + typedef unsigned short fp_digit; + typedef unsigned int fp_word; + #endif +#endif + +/* # of digits this is */ +#define DIGIT_BIT (int)((CHAR_BIT) * sizeof(fp_digit)) + +/* Max size of any number in bits. Basically the largest size you will be + * multiplying should be half [or smaller] of FP_MAX_SIZE-four_digit + * + * It defaults to 4096-bits [allowing multiplications upto 2048x2048 bits ] + */ +#ifndef FP_MAX_BITS + #define FP_MAX_BITS 4096 +#endif +#define FP_MAX_SIZE (FP_MAX_BITS+(8*DIGIT_BIT)) + +/* will this lib work? */ +#if (CHAR_BIT & 7) + #error CHAR_BIT must be a multiple of eight. +#endif +#if FP_MAX_BITS % CHAR_BIT + #error FP_MAX_BITS must be a multiple of CHAR_BIT +#endif + +#define FP_MASK (fp_digit)(-1) +#define FP_SIZE (FP_MAX_SIZE/DIGIT_BIT) + +/* signs */ +#define FP_ZPOS 0 +#define FP_NEG 1 + +/* return codes */ +#define FP_OKAY 0 +#define FP_VAL 1 +#define FP_MEM 2 + +/* equalities */ +#define FP_LT -1 /* less than */ +#define FP_EQ 0 /* equal to */ +#define FP_GT 1 /* greater than */ + +/* replies */ +#define FP_YES 1 /* yes response */ +#define FP_NO 0 /* no response */ + +/* a FP type */ +typedef struct { + fp_digit dp[FP_SIZE]; + int used, + sign; +} fp_int; + +/* externally define this symbol to ignore the default settings, useful for changing the build from the make process */ +#ifndef TFM_ALREADY_SET + +/* do we want the large set of small multiplications ? + Enable these if you are going to be doing a lot of small (<= 16 digit) multiplications say in ECC + Or if you're on a 64-bit machine doing RSA as a 1024-bit integer == 16 digits ;-) + */ +/* need to refactor the function */ +/*#define TFM_SMALL_SET */ + +/* do we want huge code + Enable these if you are doing 20, 24, 28, 32, 48, 64 digit multiplications (useful for RSA) + Less important on 64-bit machines as 32 digits == 2048 bits + */ +#if 0 +#define TFM_MUL3 +#define TFM_MUL4 +#define TFM_MUL6 +#define TFM_MUL7 +#define TFM_MUL8 +#define TFM_MUL9 +#define TFM_MUL12 +#define TFM_MUL17 +#endif +#ifdef TFM_HUGE_SET +#define TFM_MUL20 +#define TFM_MUL24 +#define TFM_MUL28 +#define TFM_MUL32 +#if (FP_MAX_BITS >= 6144) && defined(FP_64BIT) + #define TFM_MUL48 +#endif +#if (FP_MAX_BITS >= 8192) && defined(FP_64BIT) + #define TFM_MUL64 +#endif +#endif + +#if 0 +#define TFM_SQR3 +#define TFM_SQR4 +#define TFM_SQR6 +#define TFM_SQR7 +#define TFM_SQR8 +#define TFM_SQR9 +#define TFM_SQR12 +#define TFM_SQR17 +#endif +#ifdef TFM_HUGE_SET +#define TFM_SQR20 +#define TFM_SQR24 +#define TFM_SQR28 +#define TFM_SQR32 +#define TFM_SQR48 +#define TFM_SQR64 +#endif + +/* do we want some overflow checks + Not required if you make sure your numbers are within range (e.g. by default a modulus for fp_exptmod() can only be upto 2048 bits long) + */ +/* #define TFM_CHECK */ + +/* Is the target a P4 Prescott + */ +/* #define TFM_PRESCOTT */ + +/* Do we want timing resistant fp_exptmod() ? + * This makes it slower but also timing invariant with respect to the exponent + */ +/* #define TFM_TIMING_RESISTANT */ + +#endif /* TFM_ALREADY_SET */ + +/* functions */ + +/* returns a TFM ident string useful for debugging... */ +/*const char *fp_ident(void);*/ + +/* initialize [or zero] an fp int */ +#define fp_init(a) (void)XMEMSET((a), 0, sizeof(fp_int)) +#define fp_zero(a) fp_init(a) + +/* zero/even/odd ? */ +#define fp_iszero(a) (((a)->used == 0) ? FP_YES : FP_NO) +#define fp_iseven(a) (((a)->used >= 0 && (((a)->dp[0] & 1) == 0)) ? FP_YES : FP_NO) +#define fp_isodd(a) (((a)->used > 0 && (((a)->dp[0] & 1) == 1)) ? FP_YES : FP_NO) + +/* set to a small digit */ +void fp_set(fp_int *a, fp_digit b); + +/* copy from a to b */ +#define fp_copy(a, b) (void)(((a) != (b)) ? ((void)XMEMCPY((b), (a), sizeof(fp_int))) : (void)0) +#define fp_init_copy(a, b) fp_copy(b, a) + +/* clamp digits */ +#define fp_clamp(a) { while ((a)->used && (a)->dp[(a)->used-1] == 0) --((a)->used); (a)->sign = (a)->used ? (a)->sign : FP_ZPOS; } + +/* negate and absolute */ +#define fp_neg(a, b) { fp_copy(a, b); (b)->sign ^= 1; fp_clamp(b); } +#define fp_abs(a, b) { fp_copy(a, b); (b)->sign = 0; } + +/* right shift x digits */ +void fp_rshd(fp_int *a, int x); + +/* right shift x bits */ +void fp_rshb(fp_int *a, int x); + +/* left shift x digits */ +void fp_lshd(fp_int *a, int x); + +/* signed comparison */ +int fp_cmp(fp_int *a, fp_int *b); + +/* unsigned comparison */ +int fp_cmp_mag(fp_int *a, fp_int *b); + +/* power of 2 operations */ +void fp_div_2d(fp_int *a, int b, fp_int *c, fp_int *d); +void fp_mod_2d(fp_int *a, int b, fp_int *c); +void fp_mul_2d(fp_int *a, int b, fp_int *c); +void fp_2expt (fp_int *a, int b); +void fp_mul_2(fp_int *a, fp_int *c); +void fp_div_2(fp_int *a, fp_int *c); + +/* Counts the number of lsbs which are zero before the first zero bit */ +int fp_cnt_lsb(fp_int *a); + +/* c = a + b */ +void fp_add(fp_int *a, fp_int *b, fp_int *c); + +/* c = a - b */ +void fp_sub(fp_int *a, fp_int *b, fp_int *c); + +/* c = a * b */ +void fp_mul(fp_int *a, fp_int *b, fp_int *c); + +/* b = a*a */ +void fp_sqr(fp_int *a, fp_int *b); + +/* a/b => cb + d == a */ +int fp_div(fp_int *a, fp_int *b, fp_int *c, fp_int *d); + +/* c = a mod b, 0 <= c < b */ +int fp_mod(fp_int *a, fp_int *b, fp_int *c); + +/* compare against a single digit */ +int fp_cmp_d(fp_int *a, fp_digit b); + +/* c = a + b */ +void fp_add_d(fp_int *a, fp_digit b, fp_int *c); + +/* c = a - b */ +void fp_sub_d(fp_int *a, fp_digit b, fp_int *c); + +/* c = a * b */ +void fp_mul_d(fp_int *a, fp_digit b, fp_int *c); + +/* a/b => cb + d == a */ +/*int fp_div_d(fp_int *a, fp_digit b, fp_int *c, fp_digit *d);*/ + +/* c = a mod b, 0 <= c < b */ +/*int fp_mod_d(fp_int *a, fp_digit b, fp_digit *c);*/ + +/* ---> number theory <--- */ +/* d = a + b (mod c) */ +/*int fp_addmod(fp_int *a, fp_int *b, fp_int *c, fp_int *d);*/ + +/* d = a - b (mod c) */ +/*int fp_submod(fp_int *a, fp_int *b, fp_int *c, fp_int *d);*/ + +/* d = a * b (mod c) */ +int fp_mulmod(fp_int *a, fp_int *b, fp_int *c, fp_int *d); + +/* c = a * a (mod b) */ +int fp_sqrmod(fp_int *a, fp_int *b, fp_int *c); + +/* c = 1/a (mod b) */ +int fp_invmod(fp_int *a, fp_int *b, fp_int *c); + +/* c = (a, b) */ +/*void fp_gcd(fp_int *a, fp_int *b, fp_int *c);*/ + +/* c = [a, b] */ +/*void fp_lcm(fp_int *a, fp_int *b, fp_int *c);*/ + +/* setups the montgomery reduction */ +int fp_montgomery_setup(fp_int *a, fp_digit *mp); + +/* computes a = B**n mod b without division or multiplication useful for + * normalizing numbers in a Montgomery system. + */ +void fp_montgomery_calc_normalization(fp_int *a, fp_int *b); + +/* computes x/R == x (mod N) via Montgomery Reduction */ +void fp_montgomery_reduce(fp_int *a, fp_int *m, fp_digit mp); + +/* d = a**b (mod c) */ +int fp_exptmod(fp_int *a, fp_int *b, fp_int *c, fp_int *d); + +/* primality stuff */ + +/* perform a Miller-Rabin test of a to the base b and store result in "result" */ +/*void fp_prime_miller_rabin (fp_int * a, fp_int * b, int *result);*/ + +/* 256 trial divisions + 8 Miller-Rabins, returns FP_YES if probable prime */ +/*int fp_isprime(fp_int *a);*/ + +/* Primality generation flags */ +/*#define TFM_PRIME_BBS 0x0001 */ /* BBS style prime */ +/*#define TFM_PRIME_SAFE 0x0002 */ /* Safe prime (p-1)/2 == prime */ +/*#define TFM_PRIME_2MSB_OFF 0x0004 */ /* force 2nd MSB to 0 */ +/*#define TFM_PRIME_2MSB_ON 0x0008 */ /* force 2nd MSB to 1 */ + +/* callback for fp_prime_random, should fill dst with random bytes and return how many read [upto len] */ +/*typedef int tfm_prime_callback(unsigned char *dst, int len, void *dat);*/ + +/*#define fp_prime_random(a, t, size, bbs, cb, dat) fp_prime_random_ex(a, t, ((size) * 8) + 1, (bbs==1)?TFM_PRIME_BBS:0, cb, dat)*/ + +/*int fp_prime_random_ex(fp_int *a, int t, int size, int flags, tfm_prime_callback cb, void *dat);*/ + +/* radix conersions */ +int fp_count_bits(fp_int *a); +int fp_leading_bit(fp_int *a); + +int fp_unsigned_bin_size(fp_int *a); +void fp_read_unsigned_bin(fp_int *a, unsigned char *b, int c); +void fp_to_unsigned_bin(fp_int *a, unsigned char *b); + +/*int fp_signed_bin_size(fp_int *a);*/ +/*void fp_read_signed_bin(fp_int *a, unsigned char *b, int c);*/ +/*void fp_to_signed_bin(fp_int *a, unsigned char *b);*/ + +/*int fp_read_radix(fp_int *a, char *str, int radix);*/ +/*int fp_toradix(fp_int *a, char *str, int radix);*/ +/*int fp_toradix_n(fp_int * a, char *str, int radix, int maxlen);*/ + + +/* VARIOUS LOW LEVEL STUFFS */ +void s_fp_add(fp_int *a, fp_int *b, fp_int *c); +void s_fp_sub(fp_int *a, fp_int *b, fp_int *c); +void fp_reverse(unsigned char *s, int len); + +void fp_mul_comba(fp_int *a, fp_int *b, fp_int *c); + +#ifdef TFM_SMALL_SET +void fp_mul_comba_small(fp_int *a, fp_int *b, fp_int *c); +#endif + +#ifdef TFM_MUL3 +void fp_mul_comba3(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL4 +void fp_mul_comba4(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL6 +void fp_mul_comba6(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL7 +void fp_mul_comba7(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL8 +void fp_mul_comba8(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL9 +void fp_mul_comba9(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL12 +void fp_mul_comba12(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL17 +void fp_mul_comba17(fp_int *a, fp_int *b, fp_int *c); +#endif + +#ifdef TFM_MUL20 +void fp_mul_comba20(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL24 +void fp_mul_comba24(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL28 +void fp_mul_comba28(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL32 +void fp_mul_comba32(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL48 +void fp_mul_comba48(fp_int *a, fp_int *b, fp_int *c); +#endif +#ifdef TFM_MUL64 +void fp_mul_comba64(fp_int *a, fp_int *b, fp_int *c); +#endif + +void fp_sqr_comba(fp_int *a, fp_int *b); + +#ifdef TFM_SMALL_SET +void fp_sqr_comba_small(fp_int *a, fp_int *b); +#endif + +#ifdef TFM_SQR3 +void fp_sqr_comba3(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR4 +void fp_sqr_comba4(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR6 +void fp_sqr_comba6(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR7 +void fp_sqr_comba7(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR8 +void fp_sqr_comba8(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR9 +void fp_sqr_comba9(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR12 +void fp_sqr_comba12(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR17 +void fp_sqr_comba17(fp_int *a, fp_int *b); +#endif + +#ifdef TFM_SQR20 +void fp_sqr_comba20(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR24 +void fp_sqr_comba24(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR28 +void fp_sqr_comba28(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR32 +void fp_sqr_comba32(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR48 +void fp_sqr_comba48(fp_int *a, fp_int *b); +#endif +#ifdef TFM_SQR64 +void fp_sqr_comba64(fp_int *a, fp_int *b); +#endif +/*extern const char *fp_s_rmap;*/ + + +/** + * Used by CyaSSL + */ + +/* Types */ + typedef fp_digit mp_digit; + typedef fp_word mp_word; + typedef fp_int mp_int; + +/* Constants */ + #define MP_LT FP_LT /* less than */ + #define MP_EQ FP_EQ /* equal to */ + #define MP_GT FP_GT /* greater than */ + #define MP_VAL FP_VAL /* invalid */ + #define MP_OKAY FP_OKAY /* ok result */ + #define MP_NO FP_NO /* yes/no result */ + #define MP_YES FP_YES /* yes/no result */ + +/* Prototypes */ +#define mp_zero(a) fp_zero(a) +#define mp_iseven(a) fp_iseven(a) +int mp_init (mp_int * a); +void mp_clear (mp_int * a); +int mp_init_multi(mp_int* a, mp_int* b, mp_int* c, mp_int* d, mp_int* e, mp_int* f); + +int mp_add (mp_int * a, mp_int * b, mp_int * c); +int mp_sub (mp_int * a, mp_int * b, mp_int * c); +int mp_add_d (mp_int * a, mp_digit b, mp_int * c); + +int mp_mul (mp_int * a, mp_int * b, mp_int * c); +int mp_mulmod (mp_int * a, mp_int * b, mp_int * c, mp_int * d); +int mp_mod(mp_int *a, mp_int *b, mp_int *c); +int mp_invmod(mp_int *a, mp_int *b, mp_int *c); +int mp_exptmod (mp_int * g, mp_int * x, mp_int * p, mp_int * y); + +int mp_cmp(mp_int *a, mp_int *b); +int mp_cmp_d(mp_int *a, mp_digit b); + +int mp_unsigned_bin_size(mp_int * a); +int mp_read_unsigned_bin (mp_int * a, const unsigned char *b, int c); +int mp_to_unsigned_bin (mp_int * a, unsigned char *b); + +int mp_sub_d(fp_int *a, fp_digit b, fp_int *c); +int mp_copy(fp_int* a, fp_int* b); +int mp_isodd(mp_int* a); +int mp_iszero(mp_int* a); +int mp_count_bits(mp_int *a); +int mp_leading_bit(mp_int *a); +int mp_set_int(fp_int *a, fp_digit b); +void mp_rshb(mp_int *a, int x); + +#ifdef HAVE_ECC + int mp_read_radix(mp_int* a, const char* str, int radix); + int mp_set(fp_int *a, fp_digit b); + int mp_sqr(fp_int *a, fp_int *b); + int mp_montgomery_reduce(fp_int *a, fp_int *m, fp_digit mp); + int mp_montgomery_setup(fp_int *a, fp_digit *rho); + int mp_div_2(fp_int * a, fp_int * b); + int mp_init_copy(fp_int * a, fp_int * b); +#endif + +#if defined(HAVE_ECC) || defined(WOLFSSL_KEY_GEN) + int mp_sqrmod(mp_int* a, mp_int* b, mp_int* c); + int mp_montgomery_calc_normalization(mp_int *a, mp_int *b); +#endif + +#ifdef WOLFSSL_KEY_GEN +int mp_gcd(fp_int *a, fp_int *b, fp_int *c); +int mp_lcm(fp_int *a, fp_int *b, fp_int *c); +int mp_prime_is_prime(mp_int* a, int t, int* result); +#endif /* WOLFSSL_KEY_GEN */ + +int mp_cnt_lsb(fp_int *a); +int mp_div_2d(fp_int *a, int b, fp_int *c, fp_int *d); +int mp_mod_d(fp_int* a, fp_digit b, fp_digit* c); + +WOLFSSL_API word32 CheckRunTimeFastMath(void); + +/* If user uses RSA, DH, DSA, or ECC math lib directly then fast math FP_SIZE + must match, return 1 if a match otherwise 0 */ +#define CheckFastMathSettings() (FP_SIZE == CheckRunTimeFastMath()) +#ifdef __cplusplus + } +#endif + +#endif /* HAVE_FIPS */ +#endif /* WOLF_CRYPT_TFM_H */