diff --git a/wolfcrypt/src/asm.c b/wolfcrypt/src/asm.c index 2d00bd08c..d0eb6aa61 100644 --- a/wolfcrypt/src/asm.c +++ b/wolfcrypt/src/asm.c @@ -59,9 +59,9 @@ #define EAX 0 #define EBX 1 -#define ECX 2 +#define ECX 2 #define EDX 3 - + #define CPUID_AVX1 0x1 #define CPUID_AVX2 0x2 #define CPUID_RDRAND 0x4 @@ -75,22 +75,22 @@ #define IS_INTEL_ADX (cpuid_flags&CPUID_ADX) #define IS_INTEL_RDRAND (cpuid_flags&CPUID_RDRAND) #define IS_INTEL_RDSEED (cpuid_flags&CPUID_RDSEED) -#define SET_FLAGS +#define SET_FLAGS static word32 cpuid_check = 0 ; static word32 cpuid_flags = 0 ; static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) { int got_intel_cpu=0; - unsigned int reg[5]; - + unsigned int reg[5]; + reg[4] = '\0' ; - cpuid(reg, 0, 0); - if(memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 && - memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 && - memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) { - got_intel_cpu = 1; - } + cpuid(reg, 0, 0); + if(memcmp((char *)&(reg[EBX]), "Genu", 4) == 0 && + memcmp((char *)&(reg[EDX]), "ineI", 4) == 0 && + memcmp((char *)&(reg[ECX]), "ntel", 4) == 0) { + got_intel_cpu = 1; + } if (got_intel_cpu) { cpuid(reg, leaf, sub); return((reg[num]>>bit)&0x1) ; @@ -98,7 +98,7 @@ static word32 cpuid_flag(word32 leaf, word32 sub, word32 num, word32 bit) { return 0 ; } -INLINE static int set_cpuid_flags(void) { +INLINE static int set_cpuid_flags(void) { if(cpuid_check == 0) { if(cpuid_flag(7, 0, EBX, 8)){ cpuid_flags |= CPUID_BMI2 ; } if(cpuid_flag(7, 0, EBX,19)){ cpuid_flags |= CPUID_ADX ; } @@ -117,17 +117,17 @@ INLINE static int set_cpuid_flags(void) { #define IF_HAVE_INTEL_MULX(func, ret) #endif -#if defined(TFM_X86) && !defined(TFM_SSE2) +#if defined(TFM_X86) && !defined(TFM_SSE2) /* x86-32 code */ -#define MONT_START +#define MONT_START #define MONT_FINI #define LOOP_END #define LOOP_START \ mu = c[x] * mp #define INNERMUL \ -__asm__( \ +__asm__( \ "movl %5,%%eax \n\t" \ "mull %4 \n\t" \ "addl %1,%%eax \n\t" \ @@ -140,7 +140,7 @@ __asm__( \ : "%eax", "%edx", "cc") #define PROPCARRY \ -__asm__( \ +__asm__( \ "addl %1,%0 \n\t" \ "setb %%al \n\t" \ "movzbl %%al,%1 \n\t" \ @@ -152,14 +152,14 @@ __asm__( \ #elif defined(TFM_X86_64) /* x86-64 code */ -#define MONT_START +#define MONT_START #define MONT_FINI #define LOOP_END #define LOOP_START \ - mu = c[x] * mp; + mu = c[x] * mp #define INNERMUL \ -__asm__( \ +__asm__( \ "movq %5,%%rax \n\t" \ "mulq %4 \n\t" \ "addq %1,%%rax \n\t" \ @@ -190,7 +190,7 @@ __asm__( \ "adcxq %%r8,%1 \n\t" \ :"+r"(c0),"+r"(c1):"m"(pre),"r"(rdx):"%r8","%r9", "%r10", "%r11","%r12","%rdx" \ ); } - + #define MULX_INNERMUL_R2(c0, c1, pre, rdx)\ { \ @@ -243,7 +243,7 @@ __asm__( \ #endif #define INNERMUL8 \ - __asm__( \ + __asm__( \ "movq 0(%5),%%rax \n\t" \ "movq 0(%2),%%r10 \n\t" \ "movq 0x8(%5),%%r11 \n\t" \ @@ -333,10 +333,10 @@ __asm__( \ \ :"=r"(_c), "=r"(cy) \ : "0"(_c), "1"(cy), "g"(mu), "r"(tmpm)\ -: "%rax", "%rdx", "%r10", "%r11", "cc")\ +: "%rax", "%rdx", "%r10", "%r11", "cc") #define PROPCARRY \ -__asm__( \ +__asm__( \ "addq %1,%0 \n\t" \ "setb %%al \n\t" \ "movzbq %%al,%1 \n\t" \ @@ -345,7 +345,7 @@ __asm__( \ : "%rax", "cc") /******************************************************************/ -#elif defined(TFM_SSE2) +#elif defined(TFM_SSE2) /* SSE2 code (assumes 32-bit fp_digits) */ /* XMM register assignments: * xmm0 *tmpm++, then Mu * (*tmpm++) @@ -362,7 +362,7 @@ __asm__( \ __asm__("emms") #define LOOP_START \ -__asm__( \ +__asm__( \ "movd %0,%%mm1 \n\t" \ "pxor %%mm3,%%mm3 \n\t" \ "pmuludq %%mm2,%%mm1 \n\t" \ @@ -370,7 +370,7 @@ __asm__( \ /* pmuludq on mmx registers does a 32x32->64 multiply. */ #define INNERMUL \ -__asm__( \ +__asm__( \ "movd %1,%%mm4 \n\t" \ "movd %2,%%mm0 \n\t" \ "paddq %%mm4,%%mm3 \n\t" \ @@ -381,7 +381,7 @@ __asm__( \ :"=g"(_c[LO]) : "0"(_c[LO]), "g"(*tmpm++) ); #define INNERMUL8 \ -__asm__( \ +__asm__( \ "movd 0(%1),%%mm4 \n\t" \ "movd 0(%2),%%mm0 \n\t" \ "paddq %%mm4,%%mm3 \n\t" \ @@ -454,7 +454,7 @@ __asm__( \ __asm__( "movd %%mm3,%0 \n" :"=r"(cy)) #define PROPCARRY \ -__asm__( \ +__asm__( \ "addl %1,%0 \n\t" \ "setb %%al \n\t" \ "movzbl %%al,%1 \n\t" \ @@ -466,7 +466,7 @@ __asm__( \ #elif defined(TFM_ARM) /* ARMv4 code */ -#define MONT_START +#define MONT_START #define MONT_FINI #define LOOP_END #define LOOP_START \ @@ -476,7 +476,7 @@ __asm__( \ #ifdef __thumb__ #define INNERMUL \ -__asm__( \ +__asm__( \ " LDR r0,%1 \n\t" \ " ADDS r0,r0,%0 \n\t" \ " ITE CS \n\t" \ @@ -487,7 +487,7 @@ __asm__( \ :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"m"(_c[0]):"r0","cc"); #define PROPCARRY \ -__asm__( \ +__asm__( \ " LDR r0,%1 \n\t" \ " ADDS r0,r0,%0 \n\t" \ " STR r0,%1 \n\t" \ @@ -503,7 +503,7 @@ __asm__( \ #else /* __thumb__ */ #define INNERMUL \ -__asm__( \ +__asm__( \ " LDR r0,%1 \n\t" \ " ADDS r0,r0,%0 \n\t" \ " MOVCS %0,#1 \n\t" \ @@ -513,7 +513,7 @@ __asm__( \ :"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c[0]):"r0","cc"); #define PROPCARRY \ -__asm__( \ +__asm__( \ " LDR r0,%1 \n\t" \ " ADDS r0,r0,%0 \n\t" \ " STR r0,%1 \n\t" \ @@ -526,7 +526,7 @@ __asm__( \ #elif defined(TFM_PPC32) /* PPC32 */ -#define MONT_START +#define MONT_START #define MONT_FINI #define LOOP_END #define LOOP_START \ @@ -552,46 +552,46 @@ __asm__( \ #elif defined(TFM_PPC64) /* PPC64 */ -#define MONT_START +#define MONT_START #define MONT_FINI #define LOOP_END #define LOOP_START \ mu = c[x] * mp -#define INNERMUL \ -__asm__( \ - " mulld 16,%3,%4 \n\t" \ - " mulhdu 17,%3,%4 \n\t" \ - " addc 16,16,%0 \n\t" \ - " addze 17,17 \n\t" \ - " ldx 18,0,%1 \n\t" \ - " addc 16,16,18 \n\t" \ - " addze %0,17 \n\t" \ - " sdx 16,0,%1 \n\t" \ -:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"16", "17", "18","cc"); ++tmpm; +#define INNERMUL \ +__asm__( \ + " mulld r16,%3,%4 \n\t" \ + " mulhdu r17,%3,%4 \n\t" \ + " addc r16,16,%0 \n\t" \ + " addze r17,r17 \n\t" \ + " ldx r18,0,%1 \n\t" \ + " addc r16,r16,r18 \n\t" \ + " addze %0,r17 \n\t" \ + " sdx r16,0,%1 \n\t" \ +:"=r"(cy),"=m"(_c[0]):"0"(cy),"r"(mu),"r"(tmpm[0]),"1"(_c[0]):"r16", "r17", "r18","cc"); ++tmpm; -#define PROPCARRY \ -__asm__( \ - " ldx 16,0,%1 \n\t" \ - " addc 16,16,%0 \n\t" \ - " sdx 16,0,%1 \n\t" \ - " xor %0,%0,%0 \n\t" \ - " addze %0,%0 \n\t" \ -:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"16","cc"); +#define PROPCARRY \ +__asm__( \ + " ldx r16,0,%1 \n\t" \ + " addc r16,r16,%0 \n\t" \ + " sdx r16,0,%1 \n\t" \ + " xor %0,%0,%0 \n\t" \ + " addze %0,%0 \n\t" \ +:"=r"(cy),"=m"(_c[0]):"0"(cy),"1"(_c[0]):"r16","cc"); /******************************************************************/ #elif defined(TFM_AVR32) /* AVR32 */ -#define MONT_START +#define MONT_START #define MONT_FINI #define LOOP_END #define LOOP_START \ mu = c[x] * mp #define INNERMUL \ -__asm__( \ +__asm__( \ " ld.w r2,%1 \n\t" \ " add r2,%0 \n\t" \ " eor r3,r3 \n\t" \ @@ -602,7 +602,7 @@ __asm__( \ :"=r"(cy),"=r"(_c):"0"(cy),"r"(mu),"r"(*tmpm++),"1"(_c):"r2","r3"); #define PROPCARRY \ -__asm__( \ +__asm__( \ " ld.w r2,%1 \n\t" \ " add r2,%0 \n\t" \ " st.w %1,r2 \n\t" \ @@ -610,10 +610,44 @@ __asm__( \ " acr %0 \n\t" \ :"=r"(cy),"=r"(&_c[0]):"0"(cy),"1"(&_c[0]):"r2","cc"); +/******************************************************************/ +#elif defined(TFM_MIPS) + +/* MIPS */ +#define MONT_START +#define MONT_FINI +#define LOOP_END +#define LOOP_START \ + mu = c[x] * mp + +#define INNERMUL \ +__asm__( \ + " multu %3,%4 \n\t" \ + " mflo $12 \n\t" \ + " mfhi $13 \n\t" \ + " addu $12,$12,%0 \n\t" \ + " sltu $10,$12,%0 \n\t" \ + " addu $13,$13,$10 \n\t" \ + " lw $10,%1 \n\t" \ + " addu $12,$12,$10 \n\t" \ + " sltu $10,$12,$10 \n\t" \ + " addu %0,$13,$10 \n\t" \ + " sw $12,%1 \n\t" \ +:"+r"(cy),"+m"(_c[0]):""(cy),"r"(mu),"r"(tmpm[0]),""(_c[0]):"$10","$12","$13"); ++tmpm; + +#define PROPCARRY \ +__asm__( \ + " lw $10,%1 \n\t" \ + " addu $10,$10,%0 \n\t" \ + " sw $10,%1 \n\t" \ + " sltu %0,$10,%0 \n\t" \ +:"+r"(cy),"+m"(_c[0]):""(cy),""(_c[0]):"$10"); + +/******************************************************************/ #else /* ISO C code */ -#define MONT_START +#define MONT_START #define MONT_FINI #define LOOP_END #define LOOP_START \ @@ -660,7 +694,7 @@ __asm__( \ #define COMBA_FINI #define SQRADD(i, j) \ -__asm__( \ +__asm__( \ "movl %6,%%eax \n\t" \ "mull %%eax \n\t" \ "addl %%eax,%0 \n\t" \ @@ -669,7 +703,7 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","%edx","cc"); #define SQRADD2(i, j) \ -__asm__( \ +__asm__( \ "movl %6,%%eax \n\t" \ "mull %7 \n\t" \ "addl %%eax,%0 \n\t" \ @@ -689,10 +723,8 @@ __asm__( \ "xorl %2,%2 \n\t" \ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%eax","%edx","cc"); -/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */ - #define SQRADDAC(i, j) \ -__asm__( \ +__asm__( \ "movl %6,%%eax \n\t" \ "mull %7 \n\t" \ "addl %%eax,%0 \n\t" \ @@ -701,7 +733,7 @@ __asm__( \ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%eax","%edx","cc"); #define SQRADDDB \ -__asm__( \ +__asm__( \ "addl %6,%0 \n\t" \ "adcl %7,%1 \n\t" \ "adcl %8,%2 \n\t" \ @@ -730,7 +762,7 @@ __asm__( \ #define COMBA_FINI #define SQRADD(i, j) \ -__asm__( \ +__asm__( \ "movq %6,%%rax \n\t" \ "mulq %%rax \n\t" \ "addq %%rax,%0 \n\t" \ @@ -739,7 +771,7 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "x"(i) :"%rax","%rdx","cc"); #define SQRADD2(i, j) \ -__asm__( \ +__asm__( \ "movq %6,%%rax \n\t" \ "mulq %7 \n\t" \ "addq %%rax,%0 \n\t" \ @@ -751,7 +783,7 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "g"(i), "g"(j) :"%rax","%rdx","cc"); #define SQRADDSC(i, j) \ -__asm__( \ +__asm__( \ "movq %3,%%rax \n\t" \ "mulq %4 \n\t" \ "movq %%rax,%0 \n\t" \ @@ -759,10 +791,8 @@ __asm__( \ "xorq %2,%2 \n\t" \ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "g"(i), "g"(j) :"%rax","%rdx","cc"); -/* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */ - #define SQRADDAC(i, j) \ -__asm__( \ +__asm__( \ "movq %6,%%rax \n\t" \ "mulq %7 \n\t" \ "addq %%rax,%0 \n\t" \ @@ -771,7 +801,7 @@ __asm__( \ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "g"(i), "g"(j) :"%rax","%rdx","cc"); #define SQRADDDB \ -__asm__( \ +__asm__( \ "addq %6,%0 \n\t" \ "adcq %7,%1 \n\t" \ "adcq %8,%2 \n\t" \ @@ -801,7 +831,7 @@ __asm__( \ __asm__("emms"); #define SQRADD(i, j) \ -__asm__( \ +__asm__( \ "movd %6,%%mm0 \n\t" \ "pmuludq %%mm0,%%mm0\n\t" \ "movd %%mm0,%%eax \n\t" \ @@ -813,7 +843,7 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i) :"%eax","cc"); #define SQRADD2(i, j) \ -__asm__( \ +__asm__( \ "movd %6,%%mm0 \n\t" \ "movd %7,%%mm1 \n\t" \ "pmuludq %%mm1,%%mm0\n\t" \ @@ -829,7 +859,7 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2): "0"(c0), "1"(c1), "2"(c2), "m"(i), "m"(j) :"%eax","%edx","cc"); #define SQRADDSC(i, j) \ -__asm__( \ +__asm__( \ "movd %3,%%mm0 \n\t" \ "movd %4,%%mm1 \n\t" \ "pmuludq %%mm1,%%mm0\n\t" \ @@ -842,7 +872,7 @@ __asm__( \ /* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */ #define SQRADDAC(i, j) \ -__asm__( \ +__asm__( \ "movd %6,%%mm0 \n\t" \ "movd %7,%%mm1 \n\t" \ "pmuludq %%mm1,%%mm0\n\t" \ @@ -855,7 +885,7 @@ __asm__( \ :"=r"(sc0), "=r"(sc1), "=r"(sc2): "0"(sc0), "1"(sc1), "2"(sc2), "m"(i), "m"(j) :"%eax","%edx","cc"); #define SQRADDDB \ -__asm__( \ +__asm__( \ "addl %6,%0 \n\t" \ "adcl %7,%1 \n\t" \ "adcl %8,%2 \n\t" \ @@ -886,16 +916,16 @@ __asm__( \ /* multiplies point i and j, updates carry "c1" and digit c2 */ #define SQRADD(i, j) \ -__asm__( \ +__asm__( \ " UMULL r0,r1,%6,%6 \n\t" \ " ADDS %0,%0,r0 \n\t" \ " ADCS %1,%1,r1 \n\t" \ " ADC %2,%2,#0 \n\t" \ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i) : "r0", "r1", "cc"); - + /* for squaring some of the terms are doubled... */ #define SQRADD2(i, j) \ -__asm__( \ +__asm__( \ " UMULL r0,r1,%6,%7 \n\t" \ " ADDS %0,%0,r0 \n\t" \ " ADCS %1,%1,r1 \n\t" \ @@ -906,7 +936,7 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2) : "0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j) : "r0", "r1", "cc"); #define SQRADDSC(i, j) \ -__asm__( \ +__asm__( \ " UMULL %0,%1,%3,%4 \n\t" \ " SUB %2,%2,%2 \n\t" \ :"=r"(sc0), "=r"(sc1), "=r"(sc2) : "r"(i), "r"(j) : "cc"); @@ -914,7 +944,7 @@ __asm__( \ /* TAO removed sc0,1,2 as input to remove warning so %6,%7 become %3,%4 */ #define SQRADDAC(i, j) \ -__asm__( \ +__asm__( \ " UMULL r0,r1,%6,%7 \n\t" \ " ADDS %0,%0,r0 \n\t" \ " ADCS %1,%1,r1 \n\t" \ @@ -922,7 +952,7 @@ __asm__( \ :"=r"(sc0), "=r"(sc1), "=r"(sc2) : "0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j) : "r0", "r1", "cc"); #define SQRADDDB \ -__asm__( \ +__asm__( \ " ADDS %0,%0,%3 \n\t" \ " ADCS %1,%1,%4 \n\t" \ " ADC %2,%2,%5 \n\t" \ @@ -953,7 +983,7 @@ __asm__( \ /* multiplies point i and j, updates carry "c1" and digit c2 */ #define SQRADD(i, j) \ -__asm__( \ +__asm__( \ " mullw 16,%6,%6 \n\t" \ " addc %0,%0,16 \n\t" \ " mulhwu 16,%6,%6 \n\t" \ @@ -963,7 +993,7 @@ __asm__( \ /* for squaring some of the terms are doubled... */ #define SQRADD2(i, j) \ -__asm__( \ +__asm__( \ " mullw 16,%6,%7 \n\t" \ " mulhwu 17,%6,%7 \n\t" \ " addc %0,%0,16 \n\t" \ @@ -975,14 +1005,14 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc"); #define SQRADDSC(i, j) \ -__asm__( \ +__asm__( \ " mullw %0,%6,%7 \n\t" \ " mulhwu %1,%6,%7 \n\t" \ " xor %2,%2,%2 \n\t" \ :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); #define SQRADDAC(i, j) \ -__asm__( \ +__asm__( \ " mullw 16,%6,%7 \n\t" \ " addc %0,%0,16 \n\t" \ " mulhwu 16,%6,%7 \n\t" \ @@ -991,7 +1021,7 @@ __asm__( \ :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc"); #define SQRADDDB \ -__asm__( \ +__asm__( \ " addc %0,%0,%3 \n\t" \ " adde %1,%1,%4 \n\t" \ " adde %2,%2,%5 \n\t" \ @@ -1020,46 +1050,46 @@ __asm__( \ #define COMBA_FINI /* multiplies point i and j, updates carry "c1" and digit c2 */ -#define SQRADD(i, j) \ -__asm__( \ - " mulld 16,%6,%6 \n\t" \ - " addc %0,%0,16 \n\t" \ - " mulhdu 16,%6,%6 \n\t" \ - " adde %1,%1,16 \n\t" \ - " addze %2,%2 \n\t" \ -:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"16","cc"); +#define SQRADD(i, j) \ +__asm__( \ + " mulld r16,%6,%6 \n\t" \ + " addc %0,%0,r16 \n\t" \ + " mulhdu r16,%6,%6 \n\t" \ + " adde %1,%1,r16 \n\t" \ + " addze %2,%2 \n\t" \ +:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"r16","cc"); /* for squaring some of the terms are doubled... */ -#define SQRADD2(i, j) \ -__asm__( \ - " mulld 16,%6,%7 \n\t" \ - " mulhdu 17,%6,%7 \n\t" \ - " addc %0,%0,16 \n\t" \ - " adde %1,%1,17 \n\t" \ - " addze %2,%2 \n\t" \ - " addc %0,%0,16 \n\t" \ - " adde %1,%1,17 \n\t" \ - " addze %2,%2 \n\t" \ -:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16", "17","cc"); +#define SQRADD2(i, j) \ +__asm__( \ + " mulld r16,%6,%7 \n\t" \ + " mulhdu r17,%6,%7 \n\t" \ + " addc %0,%0,r16 \n\t" \ + " adde %1,%1,r17 \n\t" \ + " addze %2,%2 \n\t" \ + " addc %0,%0,r16 \n\t" \ + " adde %1,%1,r17 \n\t" \ + " addze %2,%2 \n\t" \ +:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16", "r17","cc"); #define SQRADDSC(i, j) \ -__asm__( \ +__asm__( \ " mulld %0,%6,%7 \n\t" \ " mulhdu %1,%6,%7 \n\t" \ " xor %2,%2,%2 \n\t" \ :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); -#define SQRADDAC(i, j) \ -__asm__( \ - " mulld 16,%6,%7 \n\t" \ - " addc %0,%0,16 \n\t" \ - " mulhdu 16,%6,%7 \n\t" \ - " adde %1,%1,16 \n\t" \ - " addze %2,%2 \n\t" \ -:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"16", "cc"); +#define SQRADDAC(i, j) \ +__asm__( \ + " mulld r16,%6,%7 \n\t" \ + " addc %0,%0,r16 \n\t" \ + " mulhdu r16,%6,%7 \n\t" \ + " adde %1,%1,r16 \n\t" \ + " addze %2,%2 \n\t" \ +:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r16", "cc"); #define SQRADDDB \ -__asm__( \ +__asm__( \ " addc %0,%0,%3 \n\t" \ " adde %1,%1,%4 \n\t" \ " adde %2,%2,%5 \n\t" \ @@ -1091,7 +1121,7 @@ __asm__( \ /* multiplies point i and j, updates carry "c1" and digit c2 */ #define SQRADD(i, j) \ -__asm__( \ +__asm__( \ " mulu.d r2,%6,%6 \n\t" \ " add %0,%0,r2 \n\t" \ " adc %1,%1,r3 \n\t" \ @@ -1100,7 +1130,7 @@ __asm__( \ /* for squaring some of the terms are doubled... */ #define SQRADD2(i, j) \ -__asm__( \ +__asm__( \ " mulu.d r2,%6,%7 \n\t" \ " add %0,%0,r2 \n\t" \ " adc %1,%1,r3 \n\t" \ @@ -1111,7 +1141,7 @@ __asm__( \ :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2", "r3"); #define SQRADDSC(i, j) \ -__asm__( \ +__asm__( \ " mulu.d r2,%6,%7 \n\t" \ " mov %0,r2 \n\t" \ " mov %1,r3 \n\t" \ @@ -1119,7 +1149,7 @@ __asm__( \ :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "r2", "r3"); #define SQRADDAC(i, j) \ -__asm__( \ +__asm__( \ " mulu.d r2,%6,%7 \n\t" \ " add %0,%0,r2 \n\t" \ " adc %1,%1,r3 \n\t" \ @@ -1127,7 +1157,7 @@ __asm__( \ :"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"r2", "r3"); #define SQRADDDB \ -__asm__( \ +__asm__( \ " add %0,%0,%3 \n\t" \ " adc %1,%1,%4 \n\t" \ " adc %2,%2,%5 \n\t" \ @@ -1136,6 +1166,112 @@ __asm__( \ " adc %2,%2,%5 \n\t" \ :"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "cc"); +#elif defined(TFM_MIPS) + +/* MIPS */ +#define COMBA_START + +#define CLEAR_CARRY \ + c0 = c1 = c2 = 0; + +#define COMBA_STORE(x) \ + x = c0; + +#define COMBA_STORE2(x) \ + x = c1; + +#define CARRY_FORWARD \ + do { c0 = c1; c1 = c2; c2 = 0; } while (0); + +#define COMBA_FINI + +/* multiplies point i and j, updates carry "c1" and digit c2 */ +#define SQRADD(i, j) \ +__asm__( \ + " multu %6,%6 \n\t" \ + " mflo $12 \n\t" \ + " mfhi $13 \n\t" \ + " addu %0,%0,$12 \n\t" \ + " sltu $12,%0,$12 \n\t" \ + " addu %1,%1,$13 \n\t" \ + " sltu $13,%1,$13 \n\t" \ + " addu %1,%1,$12 \n\t" \ + " sltu $12,%1,$12 \n\t" \ + " addu %2,%2,$13 \n\t" \ + " addu %2,%2,$12 \n\t" \ +:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i):"$12","$13"); + +/* for squaring some of the terms are doubled... */ +#define SQRADD2(i, j) \ +__asm__( \ + " multu %6,%7 \n\t" \ + " mflo $12 \n\t" \ + " mfhi $13 \n\t" \ + \ + " addu %0,%0,$12 \n\t" \ + " sltu $14,%0,$12 \n\t" \ + " addu %1,%1,$13 \n\t" \ + " sltu $15,%1,$13 \n\t" \ + " addu %1,%1,$14 \n\t" \ + " sltu $14,%1,$14 \n\t" \ + " addu %2,%2,$15 \n\t" \ + " addu %2,%2,$14 \n\t" \ + \ + " addu %0,%0,$12 \n\t" \ + " sltu $14,%0,$12 \n\t" \ + " addu %1,%1,$13 \n\t" \ + " sltu $15,%1,$13 \n\t" \ + " addu %1,%1,$14 \n\t" \ + " sltu $14,%1,$14 \n\t" \ + " addu %2,%2,$15 \n\t" \ + " addu %2,%2,$14 \n\t" \ +:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12", "$13", "$14", "$15"); + +#define SQRADDSC(i, j) \ +__asm__( \ + " multu %6,%7 \n\t" \ + " mflo %0 \n\t" \ + " mfhi %1 \n\t" \ + " xor %2,%2,%2 \n\t" \ +:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i),"r"(j) : "cc"); + +#define SQRADDAC(i, j) \ +__asm__( \ + " multu %6,%7 \n\t" \ + " mflo $12 \n\t" \ + " mfhi $13 \n\t" \ + " addu %0,%0,$12 \n\t" \ + " sltu $12,%0,$12 \n\t" \ + " addu %1,%1,$13 \n\t" \ + " sltu $13,%1,$13 \n\t" \ + " addu %1,%1,$12 \n\t" \ + " sltu $12,%1,$12 \n\t" \ + " addu %2,%2,$13 \n\t" \ + " addu %2,%2,$12 \n\t" \ +:"=r"(sc0), "=r"(sc1), "=r"(sc2):"0"(sc0), "1"(sc1), "2"(sc2), "r"(i), "r"(j):"$12", "$13", "$14"); + +#define SQRADDDB \ +__asm__( \ + " addu %0,%0,%3 \n\t" \ + " sltu $10,%0,%3 \n\t" \ + " addu %1,%1,$10 \n\t" \ + " sltu $10,%1,$10 \n\t" \ + " addu %1,%1,%4 \n\t" \ + " sltu $11,%1,%4 \n\t" \ + " addu %2,%2,$10 \n\t" \ + " addu %2,%2,$11 \n\t" \ + " addu %2,%2,%5 \n\t" \ + \ + " addu %0,%0,%3 \n\t" \ + " sltu $10,%0,%3 \n\t" \ + " addu %1,%1,$10 \n\t" \ + " sltu $10,%1,$10 \n\t" \ + " addu %1,%1,%4 \n\t" \ + " sltu $11,%1,%4 \n\t" \ + " addu %2,%2,$10 \n\t" \ + " addu %2,%2,$11 \n\t" \ + " addu %2,%2,%5 \n\t" \ +:"=r"(c0), "=r"(c1), "=r"(c2) : "r"(sc0), "r"(sc1), "r"(sc2), "0"(c0), "1"(c1), "2"(c2) : "$10", "$11"); #else @@ -1166,7 +1302,7 @@ __asm__( \ t = c1 + (t >> DIGIT_BIT); c1 = (fp_digit)t; \ c2 +=(fp_digit) (t >> DIGIT_BIT); \ } while (0); - + /* for squaring some of the terms are doubled... */ #define SQRADD2(i, j) \ @@ -1174,10 +1310,10 @@ __asm__( \ t = ((fp_word)i) * ((fp_word)j); \ tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \ tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \ - c2 +=(fp_digit)( tt >> DIGIT_BIT); \ + c2 +=(fp_digit)(tt >> DIGIT_BIT); \ tt = (fp_word)c0 + t; c0 = (fp_digit)tt; \ tt = (fp_word)c1 + (tt >> DIGIT_BIT); c1 = (fp_digit)tt; \ - c2 +=(fp_digit) (tt >> DIGIT_BIT); \ + c2 +=(fp_digit)(tt >> DIGIT_BIT); \ } while (0); #define SQRADDSC(i, j) \ @@ -1280,7 +1416,7 @@ __asm__( \ /* this should multiply i and j */ #define MULADD(i, j) \ -__asm__( \ +__asm__( \ "movl %6,%%eax \n\t" \ "mull %7 \n\t" \ "addl %%eax,%0 \n\t" \ @@ -1315,7 +1451,7 @@ __asm__( \ /* this should multiply i and j */ #define MULADD(i, j) \ -__asm__ ( \ +__asm__ ( \ "movq %6,%%rax \n\t" \ "mulq %7 \n\t" \ "addq %%rax,%0 \n\t" \ @@ -1410,7 +1546,7 @@ __asm__ ( \ /* this should multiply i and j */ #define MULADD(i, j) \ -__asm__( \ +__asm__( \ "movd %6,%%mm0 \n\t" \ "movd %7,%%mm1 \n\t" \ "pmuludq %%mm1,%%mm0\n\t" \ @@ -1425,7 +1561,7 @@ __asm__( \ #elif defined(TFM_ARM) /* ARM code */ -#define COMBA_START +#define COMBA_START #define COMBA_CLEAR \ c0 = c1 = c2 = 0; @@ -1442,7 +1578,7 @@ __asm__( \ #define COMBA_FINI #define MULADD(i, j) \ -__asm__( \ +__asm__( \ " UMULL r0,r1,%6,%7 \n\t" \ " ADDS %0,%0,r0 \n\t" \ " ADCS %1,%1,r1 \n\t" \ @@ -1466,11 +1602,11 @@ __asm__( \ #define COMBA_STORE2(x) \ x = c1; -#define COMBA_FINI - +#define COMBA_FINI + /* untested: will mulhwu change the flags? Docs say no */ -#define MULADD(i, j) \ -__asm__( \ +#define MULADD(i, j) \ +__asm__( \ " mullw 16,%6,%7 \n\t" \ " addc %0,%0,16 \n\t" \ " mulhwu 16,%6,%7 \n\t" \ @@ -1495,17 +1631,17 @@ __asm__( \ #define COMBA_STORE2(x) \ x = c1; -#define COMBA_FINI - -/* untested: will mulhwu change the flags? Docs say no */ +#define COMBA_FINI + +/* untested: will mulhdu change the flags? Docs say no */ #define MULADD(i, j) \ -____asm__( \ - " mulld 16,%6,%7 \n\t" \ - " addc %0,%0,16 \n\t" \ - " mulhdu 16,%6,%7 \n\t" \ - " adde %1,%1,16 \n\t" \ - " addze %2,%2 \n\t" \ -:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"16"); +____asm__( \ + " mulld r16,%6,%7 \n\t" \ + " addc %0,%0,16 \n\t" \ + " mulhdu r16,%6,%7 \n\t" \ + " adde %1,%1,16 \n\t" \ + " addze %2,%2 \n\t" \ +:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r16"); #elif defined(TFM_AVR32) @@ -1525,16 +1661,50 @@ ____asm__( \ #define COMBA_STORE2(x) \ x = c1; -#define COMBA_FINI - +#define COMBA_FINI + #define MULADD(i, j) \ -____asm__( \ +____asm__( \ " mulu.d r2,%6,%7 \n\t"\ " add %0,r2 \n\t"\ " adc %1,%1,r3 \n\t"\ " acr %2 \n\t"\ :"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"r2","r3"); +#elif defined(TFM_MIPS) + +/* MIPS */ +#define COMBA_START + +#define COMBA_CLEAR \ + c0 = c1 = c2 = 0; + +#define COMBA_FORWARD \ + do { c0 = c1; c1 = c2; c2 = 0; } while (0); + +#define COMBA_STORE(x) \ + x = c0; + +#define COMBA_STORE2(x) \ + x = c1; + +#define COMBA_FINI + +#define MULADD(i, j) \ +__asm__( \ + " multu %6,%7 \n\t" \ + " mflo $12 \n\t" \ + " mfhi $13 \n\t" \ + " addu %0,%0,$12 \n\t" \ + " sltu $12,%0,$12 \n\t" \ + " addu %1,%1,$13 \n\t" \ + " sltu $13,%1,$13 \n\t" \ + " addu %1,%1,$12 \n\t" \ + " sltu $12,%1,$12 \n\t" \ + " addu %2,%2,$13 \n\t" \ + " addu %2,%2,$12 \n\t" \ +:"=r"(c0), "=r"(c1), "=r"(c2):"0"(c0), "1"(c1), "2"(c2), "r"(i), "r"(j):"$12","$13"); + #else /* ISO C code */ @@ -1552,13 +1722,15 @@ ____asm__( \ #define COMBA_STORE2(x) \ x = c1; -#define COMBA_FINI - +#define COMBA_FINI + #define MULADD(i, j) \ - do { fp_word t; \ - t = (fp_word)c0 + ((fp_word)i) * ((fp_word)j); c0 = (fp_digit)t; \ - t = (fp_word)c1 + (t >> DIGIT_BIT); \ - c1 = (fp_digit)t; c2 += (fp_digit)(t >> DIGIT_BIT); \ + do { fp_word t; \ + t = (fp_word)c0 + ((fp_word)i) * ((fp_word)j); \ + c0 = (fp_digit)t; \ + t = (fp_word)c1 + (t >> DIGIT_BIT); \ + c1 = (fp_digit)t; \ + c2 += (fp_digit)(t >> DIGIT_BIT); \ } while (0); #endif diff --git a/wolfcrypt/src/fp_mont_small.i b/wolfcrypt/src/fp_mont_small.i index 78ff5b52e..c39925e20 100644 --- a/wolfcrypt/src/fp_mont_small.i +++ b/wolfcrypt/src/fp_mont_small.i @@ -29,7 +29,7 @@ void fp_montgomery_reduce_small(fp_int *a, fp_int *m, fp_digit mp) int oldused, x, y, pa; /* now zero the buff */ - XMEMSET(c, 0, sizeof c); + XMEMSET(c, 0, sizeof(c)); pa = m->used; diff --git a/wolfcrypt/src/integer.c b/wolfcrypt/src/integer.c index 3c948917e..4122166e3 100644 --- a/wolfcrypt/src/integer.c +++ b/wolfcrypt/src/integer.c @@ -325,10 +325,15 @@ int mp_init_copy (mp_int * a, mp_int * b) { int res; - if ((res = mp_init (a)) != MP_OKAY) { + if ((res = mp_init_size (a, b->used)) != MP_OKAY) { return res; } - return mp_copy (b, a); + + if((res = mp_copy (b, a)) != MP_OKAY) { + mp_clear(a); + } + + return res; } @@ -929,13 +934,15 @@ int mp_invmod (mp_int * a, mp_int * b, mp_int * c) #ifdef BN_FAST_MP_INVMOD_C /* if the modulus is odd we can use a faster routine instead */ - if (mp_isodd (b) == MP_YES) { + if ((mp_isodd(b) == MP_YES) && (mp_cmp_d(b, 1) != MP_EQ)) { return fast_mp_invmod (a, b, c); } #endif #ifdef BN_MP_INVMOD_SLOW_C return mp_invmod_slow(a, b, c); +#else + return MP_VAL; #endif } @@ -1379,7 +1386,7 @@ int mp_mod (mp_int * a, mp_int * b, mp_int * c) mp_int t; int res; - if ((res = mp_init (&t)) != MP_OKAY) { + if ((res = mp_init_size (&t, b->used)) != MP_OKAY) { return res; } @@ -1388,11 +1395,11 @@ int mp_mod (mp_int * a, mp_int * b, mp_int * c) return res; } - if (t.sign != b->sign) { - res = mp_add (b, &t, c); - } else { + if ((mp_iszero(&t) != MP_NO) || (t.sign == b->sign)) { res = MP_OKAY; mp_exch (&t, c); + } else { + res = mp_add (b, &t, c); } mp_clear (&t); @@ -1891,7 +1898,7 @@ int mp_exptmod_fast (mp_int * G, mp_int * X, mp_int * P, mp_int * Y, /* init M array */ /* init first cell */ - if ((err = mp_init(&M[1])) != MP_OKAY) { + if ((err = mp_init_size(&M[1], P->alloc)) != MP_OKAY) { #ifdef WOLFSSL_SMALL_STACK XFREE(M, NULL, DYNAMIC_TYPE_TMP_BUFFER); #endif @@ -1901,7 +1908,7 @@ int mp_exptmod_fast (mp_int * G, mp_int * X, mp_int * P, mp_int * Y, /* now init the second half of the array */ for (x = 1<<(winsize-1); x < (1 << winsize); x++) { - if ((err = mp_init(&M[x])) != MP_OKAY) { + if ((err = mp_init_size(&M[x], P->alloc)) != MP_OKAY) { for (y = 1<<(winsize-1); y < x; y++) { mp_clear (&M[y]); } @@ -1967,7 +1974,7 @@ int mp_exptmod_fast (mp_int * G, mp_int * X, mp_int * P, mp_int * Y, } /* setup result */ - if ((err = mp_init (&res)) != MP_OKAY) { + if ((err = mp_init_size (&res, P->alloc)) != MP_OKAY) { goto LBL_M; } @@ -1984,15 +1991,15 @@ int mp_exptmod_fast (mp_int * G, mp_int * X, mp_int * P, mp_int * Y, if ((err = mp_montgomery_calc_normalization (&res, P)) != MP_OKAY) { goto LBL_RES; } -#else - err = MP_VAL; - goto LBL_RES; -#endif /* now set M[1] to G * R mod m */ if ((err = mp_mulmod (G, &res, P, &M[1])) != MP_OKAY) { goto LBL_RES; } +#else + err = MP_VAL; + goto LBL_RES; +#endif } else { if ((err = mp_set(&res, 1)) != MP_OKAY) { goto LBL_RES; @@ -2536,7 +2543,9 @@ top: * Each successive "recursion" makes the input smaller and smaller. */ if (mp_cmp_mag (x, n) != MP_LT) { - s_mp_sub(x, n, x); + if ((err = s_mp_sub(x, n, x)) != MP_OKAY) { + return err; + } goto top; } return MP_OKAY; @@ -2573,7 +2582,9 @@ top: } if (mp_cmp_mag(a, n) != MP_LT) { - s_mp_sub(a, n, a); + if ((res = s_mp_sub(a, n, a)) != MP_OKAY) { + goto ERR; + } goto top; } @@ -2711,7 +2722,7 @@ int mp_mulmod (mp_int * a, mp_int * b, mp_int * c, mp_int * d) int res; mp_int t; - if ((res = mp_init (&t)) != MP_OKAY) { + if ((res = mp_init_size (&t, c->used)) != MP_OKAY) { return res; } @@ -3732,7 +3743,9 @@ top: } if (mp_cmp_mag(a, n) != MP_LT) { - s_mp_sub(a, n, a); + if ((res = s_mp_sub(a, n, a)) != MP_OKAY) { + goto ERR; + } goto top; } @@ -4751,7 +4764,7 @@ int mp_read_radix (mp_int * a, const char *str, int radix) mp_zero (a); /* process each digit of the string */ - while (*str) { + while (*str != '\0') { /* if the radix <= 36 the conversion is case insensitive * this allows numbers like 1AB and 1ab to represent the same value * [e.g. in hex] @@ -4780,6 +4793,12 @@ int mp_read_radix (mp_int * a, const char *str, int radix) ++str; } + /* if digit in isn't null term, then invalid character was found */ + if (*str != '\0') { + mp_zero (a); + return MP_VAL; + } + /* set the sign only if a != 0 */ if (mp_iszero(a) != MP_YES) { a->sign = neg;