Merge pull request #6971 from SparkiDev/iar_thumb2_asm

IAR Thumb2 ASM: fixes
This commit is contained in:
JacobBarthelmeh
2023-11-21 09:46:31 -07:00
committed by GitHub
15 changed files with 4029 additions and 2451 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -39,7 +39,7 @@
#ifdef WOLFSSL_ARMASM_INLINE
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__)
#if !defined(__aarch64__) && defined(__thumb__)
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
@ -2796,9 +2796,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"MOV %[a], #0x1c\n\t"
"STR %[a], [sp, #176]\n\t"
"\n"
"L_curve25519_words_%=:\n\t"
"L_curve25519_words:\n\t"
"\n"
"L_curve25519_bits_%=:\n\t"
"L_curve25519_bits:\n\t"
"LDR %[n], [sp, #164]\n\t"
"LDR %[a], [%[n], r2]\n\t"
"LDR %[n], [sp, #180]\n\t"
@ -2978,19 +2978,19 @@ int curve25519(byte* r, const byte* n, const byte* a)
"LDR %[n], [sp, #180]\n\t"
"SUBS %[n], %[n], #0x1\n\t"
"STR %[n], [sp, #180]\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BGE L_curve25519_bits_%=\n\t"
#ifdef __GNUC__
"BGE L_curve25519_bits\n\t"
#else
"BGE.N L_curve25519_bits_%=\n\t"
"BGE.W L_curve25519_bits\n\t"
#endif
"MOV %[n], #0x1f\n\t"
"STR %[n], [sp, #180]\n\t"
"SUBS %[a], %[a], #0x4\n\t"
"STR %[a], [sp, #176]\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BGE L_curve25519_words_%=\n\t"
#ifdef __GNUC__
"BGE L_curve25519_words\n\t"
#else
"BGE.N L_curve25519_words_%=\n\t"
"BGE.W L_curve25519_words\n\t"
#endif
/* Invert */
"ADD r1, sp, #0x0\n\t"
@ -3022,7 +3022,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x4\n\t"
"\n"
"L_curve25519_inv_1_%=:\n\t"
"L_curve25519_inv_1:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3030,9 +3030,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_1_%=\n\t"
"BNE L_curve25519_inv_1\n\t"
#else
"BNE.N L_curve25519_inv_1_%=\n\t"
"BNE.N L_curve25519_inv_1\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3043,7 +3043,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x9\n\t"
"\n"
"L_curve25519_inv_2_%=:\n\t"
"L_curve25519_inv_2:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3051,9 +3051,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_2_%=\n\t"
"BNE L_curve25519_inv_2\n\t"
#else
"BNE.N L_curve25519_inv_2_%=\n\t"
"BNE.N L_curve25519_inv_2\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3064,7 +3064,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x13\n\t"
"\n"
"L_curve25519_inv_3_%=:\n\t"
"L_curve25519_inv_3:\n\t"
"ADD r1, sp, #0x80\n\t"
"ADD r0, sp, #0x80\n\t"
"PUSH {r12}\n\t"
@ -3072,9 +3072,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_3_%=\n\t"
"BNE L_curve25519_inv_3\n\t"
#else
"BNE.N L_curve25519_inv_3_%=\n\t"
"BNE.N L_curve25519_inv_3\n\t"
#endif
"ADD r2, sp, #0x60\n\t"
"ADD r1, sp, #0x80\n\t"
@ -3082,7 +3082,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_mul_op\n\t"
"MOV r12, #0xa\n\t"
"\n"
"L_curve25519_inv_4_%=:\n\t"
"L_curve25519_inv_4:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3090,9 +3090,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_4_%=\n\t"
"BNE L_curve25519_inv_4\n\t"
#else
"BNE.N L_curve25519_inv_4_%=\n\t"
"BNE.N L_curve25519_inv_4\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3103,7 +3103,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x31\n\t"
"\n"
"L_curve25519_inv_5_%=:\n\t"
"L_curve25519_inv_5:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3111,9 +3111,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_5_%=\n\t"
"BNE L_curve25519_inv_5\n\t"
#else
"BNE.N L_curve25519_inv_5_%=\n\t"
"BNE.N L_curve25519_inv_5\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3124,7 +3124,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x63\n\t"
"\n"
"L_curve25519_inv_6_%=:\n\t"
"L_curve25519_inv_6:\n\t"
"ADD r1, sp, #0x80\n\t"
"ADD r0, sp, #0x80\n\t"
"PUSH {r12}\n\t"
@ -3132,9 +3132,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_6_%=\n\t"
"BNE L_curve25519_inv_6\n\t"
#else
"BNE.N L_curve25519_inv_6_%=\n\t"
"BNE.N L_curve25519_inv_6\n\t"
#endif
"ADD r2, sp, #0x60\n\t"
"ADD r1, sp, #0x80\n\t"
@ -3142,7 +3142,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_mul_op\n\t"
"MOV r12, #0x32\n\t"
"\n"
"L_curve25519_inv_7_%=:\n\t"
"L_curve25519_inv_7:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3150,9 +3150,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_7_%=\n\t"
"BNE L_curve25519_inv_7\n\t"
#else
"BNE.N L_curve25519_inv_7_%=\n\t"
"BNE.N L_curve25519_inv_7\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3160,7 +3160,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_mul_op\n\t"
"MOV r12, #0x5\n\t"
"\n"
"L_curve25519_inv_8_%=:\n\t"
"L_curve25519_inv_8:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -3168,9 +3168,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_8_%=\n\t"
"BNE L_curve25519_inv_8\n\t"
#else
"BNE.N L_curve25519_inv_8_%=\n\t"
"BNE.N L_curve25519_inv_8\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -3234,7 +3234,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"STM r3, {r4, r5, r6, r7, r8, r9, r10, r11}\n\t"
"MOV %[a], #0xfe\n\t"
"\n"
"L_curve25519_bits_%=:\n\t"
"L_curve25519_bits:\n\t"
"STR %[a], [sp, #168]\n\t"
"LDR %[n], [sp, #160]\n\t"
"AND r4, %[a], #0x1f\n\t"
@ -3320,9 +3320,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"LDR %[a], [sp, #168]\n\t"
"SUBS %[a], %[a], #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BGE L_curve25519_bits_%=\n\t"
"BGE L_curve25519_bits\n\t"
#else
"BGE.N L_curve25519_bits_%=\n\t"
"BGE.N L_curve25519_bits\n\t"
#endif
/* Cycle Count: 171 */
"LDR %[n], [sp, #184]\n\t"
@ -3359,7 +3359,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x4\n\t"
"\n"
"L_curve25519_inv_1_%=:\n\t"
"L_curve25519_inv_1:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3367,9 +3367,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_1_%=\n\t"
"BNE L_curve25519_inv_1\n\t"
#else
"BNE.N L_curve25519_inv_1_%=\n\t"
"BNE.N L_curve25519_inv_1\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3380,7 +3380,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x9\n\t"
"\n"
"L_curve25519_inv_2_%=:\n\t"
"L_curve25519_inv_2:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3388,9 +3388,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_2_%=\n\t"
"BNE L_curve25519_inv_2\n\t"
#else
"BNE.N L_curve25519_inv_2_%=\n\t"
"BNE.N L_curve25519_inv_2\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3401,7 +3401,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x13\n\t"
"\n"
"L_curve25519_inv_3_%=:\n\t"
"L_curve25519_inv_3:\n\t"
"ADD r1, sp, #0x80\n\t"
"ADD r0, sp, #0x80\n\t"
"PUSH {r12}\n\t"
@ -3409,9 +3409,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_3_%=\n\t"
"BNE L_curve25519_inv_3\n\t"
#else
"BNE.N L_curve25519_inv_3_%=\n\t"
"BNE.N L_curve25519_inv_3\n\t"
#endif
"ADD r2, sp, #0x60\n\t"
"ADD r1, sp, #0x80\n\t"
@ -3419,7 +3419,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_mul_op\n\t"
"MOV r12, #0xa\n\t"
"\n"
"L_curve25519_inv_4_%=:\n\t"
"L_curve25519_inv_4:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3427,9 +3427,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_4_%=\n\t"
"BNE L_curve25519_inv_4\n\t"
#else
"BNE.N L_curve25519_inv_4_%=\n\t"
"BNE.N L_curve25519_inv_4\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3440,7 +3440,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x31\n\t"
"\n"
"L_curve25519_inv_5_%=:\n\t"
"L_curve25519_inv_5:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3448,9 +3448,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_5_%=\n\t"
"BNE L_curve25519_inv_5\n\t"
#else
"BNE.N L_curve25519_inv_5_%=\n\t"
"BNE.N L_curve25519_inv_5\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3461,7 +3461,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_sq_op\n\t"
"MOV r12, #0x63\n\t"
"\n"
"L_curve25519_inv_6_%=:\n\t"
"L_curve25519_inv_6:\n\t"
"ADD r1, sp, #0x80\n\t"
"ADD r0, sp, #0x80\n\t"
"PUSH {r12}\n\t"
@ -3469,9 +3469,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_6_%=\n\t"
"BNE L_curve25519_inv_6\n\t"
#else
"BNE.N L_curve25519_inv_6_%=\n\t"
"BNE.N L_curve25519_inv_6\n\t"
#endif
"ADD r2, sp, #0x60\n\t"
"ADD r1, sp, #0x80\n\t"
@ -3479,7 +3479,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_mul_op\n\t"
"MOV r12, #0x32\n\t"
"\n"
"L_curve25519_inv_7_%=:\n\t"
"L_curve25519_inv_7:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3487,9 +3487,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_7_%=\n\t"
"BNE L_curve25519_inv_7\n\t"
#else
"BNE.N L_curve25519_inv_7_%=\n\t"
"BNE.N L_curve25519_inv_7\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3497,7 +3497,7 @@ int curve25519(byte* r, const byte* n, const byte* a)
"BL fe_mul_op\n\t"
"MOV r12, #0x5\n\t"
"\n"
"L_curve25519_inv_8_%=:\n\t"
"L_curve25519_inv_8:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -3505,9 +3505,9 @@ int curve25519(byte* r, const byte* n, const byte* a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_curve25519_inv_8_%=\n\t"
"BNE L_curve25519_inv_8\n\t"
#else
"BNE.N L_curve25519_inv_8_%=\n\t"
"BNE.N L_curve25519_inv_8\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -3589,7 +3589,7 @@ void fe_invert(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x4\n\t"
"\n"
"L_fe_invert1_%=:\n\t"
"L_fe_invert1:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -3597,9 +3597,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert1_%=\n\t"
"BNE L_fe_invert1\n\t"
#else
"BNE.N L_fe_invert1_%=\n\t"
"BNE.N L_fe_invert1\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -3610,7 +3610,7 @@ void fe_invert(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x9\n\t"
"\n"
"L_fe_invert2_%=:\n\t"
"L_fe_invert2:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -3618,9 +3618,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert2_%=\n\t"
"BNE L_fe_invert2\n\t"
#else
"BNE.N L_fe_invert2_%=\n\t"
"BNE.N L_fe_invert2\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -3631,7 +3631,7 @@ void fe_invert(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x13\n\t"
"\n"
"L_fe_invert3_%=:\n\t"
"L_fe_invert3:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3639,9 +3639,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert3_%=\n\t"
"BNE L_fe_invert3\n\t"
#else
"BNE.N L_fe_invert3_%=\n\t"
"BNE.N L_fe_invert3\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3649,7 +3649,7 @@ void fe_invert(fe r, const fe a)
"BL fe_mul_op\n\t"
"MOV r12, #0xa\n\t"
"\n"
"L_fe_invert4_%=:\n\t"
"L_fe_invert4:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -3657,9 +3657,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert4_%=\n\t"
"BNE L_fe_invert4\n\t"
#else
"BNE.N L_fe_invert4_%=\n\t"
"BNE.N L_fe_invert4\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -3670,7 +3670,7 @@ void fe_invert(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x31\n\t"
"\n"
"L_fe_invert5_%=:\n\t"
"L_fe_invert5:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -3678,9 +3678,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert5_%=\n\t"
"BNE L_fe_invert5\n\t"
#else
"BNE.N L_fe_invert5_%=\n\t"
"BNE.N L_fe_invert5\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -3691,7 +3691,7 @@ void fe_invert(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x63\n\t"
"\n"
"L_fe_invert6_%=:\n\t"
"L_fe_invert6:\n\t"
"ADD r1, sp, #0x60\n\t"
"ADD r0, sp, #0x60\n\t"
"PUSH {r12}\n\t"
@ -3699,9 +3699,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert6_%=\n\t"
"BNE L_fe_invert6\n\t"
#else
"BNE.N L_fe_invert6_%=\n\t"
"BNE.N L_fe_invert6\n\t"
#endif
"ADD r2, sp, #0x40\n\t"
"ADD r1, sp, #0x60\n\t"
@ -3709,7 +3709,7 @@ void fe_invert(fe r, const fe a)
"BL fe_mul_op\n\t"
"MOV r12, #0x32\n\t"
"\n"
"L_fe_invert7_%=:\n\t"
"L_fe_invert7:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -3717,9 +3717,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert7_%=\n\t"
"BNE L_fe_invert7\n\t"
#else
"BNE.N L_fe_invert7_%=\n\t"
"BNE.N L_fe_invert7\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -3727,7 +3727,7 @@ void fe_invert(fe r, const fe a)
"BL fe_mul_op\n\t"
"MOV r12, #0x5\n\t"
"\n"
"L_fe_invert8_%=:\n\t"
"L_fe_invert8:\n\t"
"ADD r1, sp, #0x20\n\t"
"ADD r0, sp, #0x20\n\t"
"PUSH {r12}\n\t"
@ -3735,9 +3735,9 @@ void fe_invert(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_invert8_%=\n\t"
"BNE L_fe_invert8\n\t"
#else
"BNE.N L_fe_invert8_%=\n\t"
"BNE.N L_fe_invert8\n\t"
#endif
"MOV r2, sp\n\t"
"ADD r1, sp, #0x20\n\t"
@ -4275,7 +4275,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x4\n\t"
"\n"
"L_fe_pow22523_1_%=:\n\t"
"L_fe_pow22523_1:\n\t"
"ADD r1, sp, #0x20\n\t"
"ADD r0, sp, #0x20\n\t"
"PUSH {r12}\n\t"
@ -4283,9 +4283,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_1_%=\n\t"
"BNE L_fe_pow22523_1\n\t"
#else
"BNE.N L_fe_pow22523_1_%=\n\t"
"BNE.N L_fe_pow22523_1\n\t"
#endif
"MOV r2, sp\n\t"
"ADD r1, sp, #0x20\n\t"
@ -4296,7 +4296,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x9\n\t"
"\n"
"L_fe_pow22523_2_%=:\n\t"
"L_fe_pow22523_2:\n\t"
"ADD r1, sp, #0x20\n\t"
"ADD r0, sp, #0x20\n\t"
"PUSH {r12}\n\t"
@ -4304,9 +4304,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_2_%=\n\t"
"BNE L_fe_pow22523_2\n\t"
#else
"BNE.N L_fe_pow22523_2_%=\n\t"
"BNE.N L_fe_pow22523_2\n\t"
#endif
"MOV r2, sp\n\t"
"ADD r1, sp, #0x20\n\t"
@ -4317,7 +4317,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x13\n\t"
"\n"
"L_fe_pow22523_3_%=:\n\t"
"L_fe_pow22523_3:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -4325,9 +4325,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_3_%=\n\t"
"BNE L_fe_pow22523_3\n\t"
#else
"BNE.N L_fe_pow22523_3_%=\n\t"
"BNE.N L_fe_pow22523_3\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -4335,7 +4335,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_mul_op\n\t"
"MOV r12, #0xa\n\t"
"\n"
"L_fe_pow22523_4_%=:\n\t"
"L_fe_pow22523_4:\n\t"
"ADD r1, sp, #0x20\n\t"
"ADD r0, sp, #0x20\n\t"
"PUSH {r12}\n\t"
@ -4343,9 +4343,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_4_%=\n\t"
"BNE L_fe_pow22523_4\n\t"
#else
"BNE.N L_fe_pow22523_4_%=\n\t"
"BNE.N L_fe_pow22523_4\n\t"
#endif
"MOV r2, sp\n\t"
"ADD r1, sp, #0x20\n\t"
@ -4356,7 +4356,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x31\n\t"
"\n"
"L_fe_pow22523_5_%=:\n\t"
"L_fe_pow22523_5:\n\t"
"ADD r1, sp, #0x20\n\t"
"ADD r0, sp, #0x20\n\t"
"PUSH {r12}\n\t"
@ -4364,9 +4364,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_5_%=\n\t"
"BNE L_fe_pow22523_5\n\t"
#else
"BNE.N L_fe_pow22523_5_%=\n\t"
"BNE.N L_fe_pow22523_5\n\t"
#endif
"MOV r2, sp\n\t"
"ADD r1, sp, #0x20\n\t"
@ -4377,7 +4377,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_sq_op\n\t"
"MOV r12, #0x63\n\t"
"\n"
"L_fe_pow22523_6_%=:\n\t"
"L_fe_pow22523_6:\n\t"
"ADD r1, sp, #0x40\n\t"
"ADD r0, sp, #0x40\n\t"
"PUSH {r12}\n\t"
@ -4385,9 +4385,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_6_%=\n\t"
"BNE L_fe_pow22523_6\n\t"
#else
"BNE.N L_fe_pow22523_6_%=\n\t"
"BNE.N L_fe_pow22523_6\n\t"
#endif
"ADD r2, sp, #0x20\n\t"
"ADD r1, sp, #0x40\n\t"
@ -4395,7 +4395,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_mul_op\n\t"
"MOV r12, #0x32\n\t"
"\n"
"L_fe_pow22523_7_%=:\n\t"
"L_fe_pow22523_7:\n\t"
"ADD r1, sp, #0x20\n\t"
"ADD r0, sp, #0x20\n\t"
"PUSH {r12}\n\t"
@ -4403,9 +4403,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_7_%=\n\t"
"BNE L_fe_pow22523_7\n\t"
#else
"BNE.N L_fe_pow22523_7_%=\n\t"
"BNE.N L_fe_pow22523_7\n\t"
#endif
"MOV r2, sp\n\t"
"ADD r1, sp, #0x20\n\t"
@ -4413,7 +4413,7 @@ void fe_pow22523(fe r, const fe a)
"BL fe_mul_op\n\t"
"MOV r12, #0x2\n\t"
"\n"
"L_fe_pow22523_8_%=:\n\t"
"L_fe_pow22523_8:\n\t"
"MOV r1, sp\n\t"
"MOV r0, sp\n\t"
"PUSH {r12}\n\t"
@ -4421,9 +4421,9 @@ void fe_pow22523(fe r, const fe a)
"POP {r12}\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_fe_pow22523_8_%=\n\t"
"BNE L_fe_pow22523_8\n\t"
#else
"BNE.N L_fe_pow22523_8_%=\n\t"
"BNE.N L_fe_pow22523_8\n\t"
#endif
"LDR r2, [sp, #100]\n\t"
"MOV r1, sp\n\t"
@ -6904,7 +6904,7 @@ void sc_muladd(byte* s, const byte* a, const byte* b, const byte* c)
#endif /* HAVE_CURVE25519 || HAVE_ED25519 */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#endif /* !defined(__aarch64__) && defined(__arm__) */
#endif /* !defined(__aarch64__) && defined(__thumb__) */
#endif /* WOLFSSL_ARMASM */
#endif /* WOLFSSL_ARMASM_INLINE */

View File

@ -113,7 +113,7 @@ Transform_Sha256_Len:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xc0
ADR r3, L_SHA256_transform_len_k
# Copy digest to add in at end
/* Copy digest to add in at end */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
@ -122,9 +122,9 @@ Transform_Sha256_Len:
STRD r6, r7, [sp, #72]
STRD r8, r9, [sp, #80]
STRD r10, r11, [sp, #88]
# Start of loop processing a block
/* Start of loop processing a block */
L_SHA256_transform_len_begin:
# Load, Reverse and Store W - 64 bytes
/* Load, Reverse and Store W - 64 bytes */
LDR r4, [r1]
LDR r5, [r1, #4]
LDR r6, [r1, #8]
@ -169,9 +169,9 @@ L_SHA256_transform_len_begin:
LDR r4, [r0, #8]
EOR r11, r11, r4
MOV r12, #0x3
# Start of 16 rounds
/* Start of 16 rounds */
L_SHA256_transform_len_start:
# Round 0
/* Round 0 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
@ -203,7 +203,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
# Calc new W[0]
/* Calc new W[0] */
LDR r6, [sp, #56]
LDR r7, [sp, #36]
LDR r8, [sp, #4]
@ -218,7 +218,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp]
# Round 1
/* Round 1 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
@ -250,7 +250,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
# Calc new W[1]
/* Calc new W[1] */
LDR r6, [sp, #60]
LDR r7, [sp, #40]
LDR r8, [sp, #8]
@ -265,7 +265,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #4]
# Round 2
/* Round 2 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
@ -297,7 +297,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
# Calc new W[2]
/* Calc new W[2] */
LDR r6, [sp]
LDR r7, [sp, #44]
LDR r8, [sp, #12]
@ -312,7 +312,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #8]
# Round 3
/* Round 3 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
@ -344,7 +344,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
# Calc new W[3]
/* Calc new W[3] */
LDR r6, [sp, #4]
LDR r7, [sp, #48]
LDR r8, [sp, #16]
@ -359,7 +359,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #12]
# Round 4
/* Round 4 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
@ -391,7 +391,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
# Calc new W[4]
/* Calc new W[4] */
LDR r6, [sp, #8]
LDR r7, [sp, #52]
LDR r8, [sp, #20]
@ -406,7 +406,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #16]
# Round 5
/* Round 5 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
@ -438,7 +438,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
# Calc new W[5]
/* Calc new W[5] */
LDR r6, [sp, #12]
LDR r7, [sp, #56]
LDR r8, [sp, #24]
@ -453,7 +453,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #20]
# Round 6
/* Round 6 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
@ -485,7 +485,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
# Calc new W[6]
/* Calc new W[6] */
LDR r6, [sp, #16]
LDR r7, [sp, #60]
LDR r8, [sp, #28]
@ -500,7 +500,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #24]
# Round 7
/* Round 7 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
@ -532,7 +532,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
# Calc new W[7]
/* Calc new W[7] */
LDR r6, [sp, #20]
LDR r7, [sp]
LDR r8, [sp, #32]
@ -547,7 +547,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #28]
# Round 8
/* Round 8 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
@ -579,7 +579,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
# Calc new W[8]
/* Calc new W[8] */
LDR r6, [sp, #24]
LDR r7, [sp, #4]
LDR r8, [sp, #36]
@ -594,7 +594,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #32]
# Round 9
/* Round 9 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
@ -626,7 +626,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
# Calc new W[9]
/* Calc new W[9] */
LDR r6, [sp, #28]
LDR r7, [sp, #8]
LDR r8, [sp, #40]
@ -641,7 +641,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #36]
# Round 10
/* Round 10 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
@ -673,7 +673,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
# Calc new W[10]
/* Calc new W[10] */
LDR r6, [sp, #32]
LDR r7, [sp, #12]
LDR r8, [sp, #44]
@ -688,7 +688,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #40]
# Round 11
/* Round 11 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
@ -720,7 +720,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
# Calc new W[11]
/* Calc new W[11] */
LDR r6, [sp, #36]
LDR r7, [sp, #16]
LDR r8, [sp, #48]
@ -735,7 +735,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #44]
# Round 12
/* Round 12 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
@ -767,7 +767,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
# Calc new W[12]
/* Calc new W[12] */
LDR r6, [sp, #40]
LDR r7, [sp, #20]
LDR r8, [sp, #52]
@ -782,7 +782,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #48]
# Round 13
/* Round 13 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
@ -814,7 +814,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
# Calc new W[13]
/* Calc new W[13] */
LDR r6, [sp, #44]
LDR r7, [sp, #24]
LDR r8, [sp, #56]
@ -829,7 +829,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #52]
# Round 14
/* Round 14 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
@ -861,7 +861,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
# Calc new W[14]
/* Calc new W[14] */
LDR r6, [sp, #48]
LDR r7, [sp, #28]
LDR r8, [sp, #60]
@ -876,7 +876,7 @@ L_SHA256_transform_len_start:
ADD r4, r4, r5
ADD r9, r9, r4
STR r9, [sp, #56]
# Round 15
/* Round 15 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
@ -908,7 +908,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
# Calc new W[15]
/* Calc new W[15] */
LDR r6, [sp, #52]
LDR r7, [sp, #32]
LDR r8, [sp]
@ -925,12 +925,12 @@ L_SHA256_transform_len_start:
STR r9, [sp, #60]
ADD r3, r3, #0x40
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
#ifdef __GNUC__
BNE L_SHA256_transform_len_start
#else
BNE.N L_SHA256_transform_len_start
BNE.W L_SHA256_transform_len_start
#endif
# Round 0
/* Round 0 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
@ -962,7 +962,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
# Round 1
/* Round 1 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
@ -994,7 +994,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
# Round 2
/* Round 2 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
@ -1026,7 +1026,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
# Round 3
/* Round 3 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
@ -1058,7 +1058,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
# Round 4
/* Round 4 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
@ -1090,7 +1090,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
# Round 5
/* Round 5 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
@ -1122,7 +1122,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
# Round 6
/* Round 6 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
@ -1154,7 +1154,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
# Round 7
/* Round 7 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
@ -1186,7 +1186,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
# Round 8
/* Round 8 */
LDR r5, [r0, #16]
LDR r6, [r0, #20]
LDR r7, [r0, #24]
@ -1218,7 +1218,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #12]
STR r9, [r0, #28]
# Round 9
/* Round 9 */
LDR r5, [r0, #12]
LDR r6, [r0, #16]
LDR r7, [r0, #20]
@ -1250,7 +1250,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #8]
STR r9, [r0, #24]
# Round 10
/* Round 10 */
LDR r5, [r0, #8]
LDR r6, [r0, #12]
LDR r7, [r0, #16]
@ -1282,7 +1282,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #4]
STR r9, [r0, #20]
# Round 11
/* Round 11 */
LDR r5, [r0, #4]
LDR r6, [r0, #8]
LDR r7, [r0, #12]
@ -1314,7 +1314,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0]
STR r9, [r0, #16]
# Round 12
/* Round 12 */
LDR r5, [r0]
LDR r6, [r0, #4]
LDR r7, [r0, #8]
@ -1346,7 +1346,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #28]
STR r9, [r0, #12]
# Round 13
/* Round 13 */
LDR r5, [r0, #28]
LDR r6, [r0]
LDR r7, [r0, #4]
@ -1378,7 +1378,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #24]
STR r9, [r0, #8]
# Round 14
/* Round 14 */
LDR r5, [r0, #24]
LDR r6, [r0, #28]
LDR r7, [r0]
@ -1410,7 +1410,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r11
STR r8, [r0, #20]
STR r9, [r0, #4]
# Round 15
/* Round 15 */
LDR r5, [r0, #20]
LDR r6, [r0, #24]
LDR r7, [r0, #28]
@ -1442,7 +1442,7 @@ L_SHA256_transform_len_start:
ADD r9, r9, r10
STR r8, [r0, #16]
STR r9, [r0]
# Add in digest from start
/* Add in digest from start */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [sp, #64]
@ -1470,14 +1470,14 @@ L_SHA256_transform_len_start:
SUBS r2, r2, #0x40
SUB r3, r3, #0xc0
ADD r1, r1, #0x40
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
#ifdef __GNUC__
BNE L_SHA256_transform_len_begin
#else
BNE.N L_SHA256_transform_len_begin
BNE.W L_SHA256_transform_len_begin
#endif
ADD sp, sp, #0xc0
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Cycle Count = 1874
/* Cycle Count = 1874 */
.size Transform_Sha256_Len,.-Transform_Sha256_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */
#endif /* !NO_SHA256 */

View File

@ -39,7 +39,7 @@
#ifdef WOLFSSL_ARMASM_INLINE
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__)
#if !defined(__aarch64__) && defined(__thumb__)
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
@ -84,8 +84,8 @@ void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
register wc_Sha256* sha256 __asm__ ("r0") = (wc_Sha256*)sha256_p;
register const byte* data __asm__ ("r1") = (const byte*)data_p;
register word32 len __asm__ ("r2") = (word32)len_p;
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
register uint32_t* L_SHA256_transform_len_k_c __asm__ ("r3") = (uint32_t*)&L_SHA256_transform_len_k;
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
__asm__ __volatile__ (
"SUB sp, sp, #0xc0\n\t"
@ -101,7 +101,7 @@ void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
"STRD r10, r11, [sp, #88]\n\t"
/* Start of loop processing a block */
"\n"
"L_SHA256_transform_len_begin_%=:\n\t"
"L_SHA256_transform_len_begin:\n\t"
/* Load, Reverse and Store W - 64 bytes */
"LDR r4, [%[data]]\n\t"
"LDR r5, [%[data], #4]\n\t"
@ -149,7 +149,7 @@ void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
"MOV r12, #0x3\n\t"
/* Start of 16 rounds */
"\n"
"L_SHA256_transform_len_start_%=:\n\t"
"L_SHA256_transform_len_start:\n\t"
/* Round 0 */
"LDR r5, [%[sha256], #16]\n\t"
"LDR r6, [%[sha256], #20]\n\t"
@ -904,10 +904,10 @@ void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
"STR r9, [sp, #60]\n\t"
"ADD r3, r3, #0x40\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_SHA256_transform_len_start_%=\n\t"
#ifdef __GNUC__
"BNE L_SHA256_transform_len_start\n\t"
#else
"BNE.N L_SHA256_transform_len_start_%=\n\t"
"BNE.W L_SHA256_transform_len_start\n\t"
#endif
/* Round 0 */
"LDR r5, [%[sha256], #16]\n\t"
@ -1449,14 +1449,20 @@ void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
"SUBS %[len], %[len], #0x40\n\t"
"SUB r3, r3, #0xc0\n\t"
"ADD %[data], %[data], #0x40\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_SHA256_transform_len_begin_%=\n\t"
#ifdef __GNUC__
"BNE L_SHA256_transform_len_begin\n\t"
#else
"BNE.N L_SHA256_transform_len_begin_%=\n\t"
"BNE.W L_SHA256_transform_len_begin\n\t"
#endif
"ADD sp, sp, #0xc0\n\t"
: [sha256] "+r" (sha256), [data] "+r" (data), [len] "+r" (len), [L_SHA256_transform_len_k] "+r" (L_SHA256_transform_len_k_c)
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
: [sha256] "+r" (sha256), [data] "+r" (data), [len] "+r" (len),
[L_SHA256_transform_len_k] "+r" (L_SHA256_transform_len_k_c)
:
#else
: [sha256] "+r" (sha256), [data] "+r" (data), [len] "+r" (len)
: [L_SHA256_transform_len_k] "r" (L_SHA256_transform_len_k)
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
: "memory", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"
);
}
@ -1465,7 +1471,7 @@ void Transform_Sha256_Len(wc_Sha256* sha256, const byte* data, word32 len)
#endif /* !NO_SHA256 */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#endif /* !defined(__aarch64__) && defined(__arm__) */
#endif /* !defined(__aarch64__) && defined(__thumb__) */
#endif /* WOLFSSL_ARMASM */
#endif /* WOLFSSL_ARMASM_INLINE */

View File

@ -209,7 +209,7 @@ Transform_Sha512_Len:
PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr}
SUB sp, sp, #0xc0
ADR r3, L_SHA512_transform_len_k
# Copy digest to add in at end
/* Copy digest to add in at end */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [r0, #16]
@ -226,9 +226,9 @@ Transform_Sha512_Len:
STRD r6, r7, [sp, #168]
STRD r8, r9, [sp, #176]
STRD r10, r11, [sp, #184]
# Start of loop processing a block
/* Start of loop processing a block */
L_SHA512_transform_len_begin:
# Load, Reverse and Store W
/* Load, Reverse and Store W */
LDR r4, [r1]
LDR r5, [r1, #4]
LDR r6, [r1, #8]
@ -325,15 +325,15 @@ L_SHA512_transform_len_begin:
STR r8, [sp, #116]
STR r11, [sp, #120]
STR r10, [sp, #124]
# Pre-calc: b ^ c
/* Pre-calc: b ^ c */
LDRD r10, r11, [r0, #8]
LDRD r4, r5, [r0, #16]
EOR r10, r10, r4
EOR r11, r11, r5
MOV r12, #0x4
# Start of 16 rounds
/* Start of 16 rounds */
L_SHA512_transform_len_start:
# Round 0
/* Round 0 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -413,7 +413,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
# Calc new W[0]
/* Calc new W[0] */
LDRD r4, r5, [sp, #112]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -457,7 +457,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp]
# Round 1
/* Round 1 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -537,7 +537,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
# Calc new W[1]
/* Calc new W[1] */
LDRD r4, r5, [sp, #120]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -581,7 +581,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #8]
# Round 2
/* Round 2 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -661,7 +661,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
# Calc new W[2]
/* Calc new W[2] */
LDRD r4, r5, [sp]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -705,7 +705,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #16]
# Round 3
/* Round 3 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -785,7 +785,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
# Calc new W[3]
/* Calc new W[3] */
LDRD r4, r5, [sp, #8]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -829,7 +829,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #24]
# Round 4
/* Round 4 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -909,7 +909,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
# Calc new W[4]
/* Calc new W[4] */
LDRD r4, r5, [sp, #16]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -953,7 +953,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #32]
# Round 5
/* Round 5 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1033,7 +1033,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
# Calc new W[5]
/* Calc new W[5] */
LDRD r4, r5, [sp, #24]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1077,7 +1077,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #40]
# Round 6
/* Round 6 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1157,7 +1157,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
# Calc new W[6]
/* Calc new W[6] */
LDRD r4, r5, [sp, #32]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1201,7 +1201,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #48]
# Round 7
/* Round 7 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1281,7 +1281,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
# Calc new W[7]
/* Calc new W[7] */
LDRD r4, r5, [sp, #40]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1325,7 +1325,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #56]
# Round 8
/* Round 8 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1405,7 +1405,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
# Calc new W[8]
/* Calc new W[8] */
LDRD r4, r5, [sp, #48]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1449,7 +1449,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #64]
# Round 9
/* Round 9 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1529,7 +1529,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
# Calc new W[9]
/* Calc new W[9] */
LDRD r4, r5, [sp, #56]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1573,7 +1573,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #72]
# Round 10
/* Round 10 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1653,7 +1653,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
# Calc new W[10]
/* Calc new W[10] */
LDRD r4, r5, [sp, #64]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1697,7 +1697,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #80]
# Round 11
/* Round 11 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1777,7 +1777,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
# Calc new W[11]
/* Calc new W[11] */
LDRD r4, r5, [sp, #72]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1821,7 +1821,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #88]
# Round 12
/* Round 12 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -1901,7 +1901,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
# Calc new W[12]
/* Calc new W[12] */
LDRD r4, r5, [sp, #80]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -1945,7 +1945,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #96]
# Round 13
/* Round 13 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2025,7 +2025,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
# Calc new W[13]
/* Calc new W[13] */
LDRD r4, r5, [sp, #88]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -2069,7 +2069,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #104]
# Round 14
/* Round 14 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2149,7 +2149,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
# Calc new W[14]
/* Calc new W[14] */
LDRD r4, r5, [sp, #96]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -2193,7 +2193,7 @@ L_SHA512_transform_len_start:
ADDS r4, r4, r6
ADC r5, r5, r7
STRD r4, r5, [sp, #112]
# Round 15
/* Round 15 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2273,7 +2273,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
# Calc new W[15]
/* Calc new W[15] */
LDRD r4, r5, [sp, #104]
LSRS r6, r4, #19
LSRS r7, r5, #19
@ -2319,12 +2319,12 @@ L_SHA512_transform_len_start:
STRD r4, r5, [sp, #120]
ADD r3, r3, #0x80
SUBS r12, r12, #0x1
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
#ifdef __GNUC__
BNE L_SHA512_transform_len_start
#else
BNE.N L_SHA512_transform_len_start
BNE.W L_SHA512_transform_len_start
#endif
# Round 0
/* Round 0 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2404,7 +2404,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
# Round 1
/* Round 1 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2484,7 +2484,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
# Round 2
/* Round 2 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2564,7 +2564,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
# Round 3
/* Round 3 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2644,7 +2644,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
# Round 4
/* Round 4 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2724,7 +2724,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
# Round 5
/* Round 5 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2804,7 +2804,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
# Round 6
/* Round 6 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2884,7 +2884,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
# Round 7
/* Round 7 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -2964,7 +2964,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
# Round 8
/* Round 8 */
LDRD r4, r5, [r0, #32]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3044,7 +3044,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #56]
MOV r10, r8
MOV r11, r9
# Round 9
/* Round 9 */
LDRD r4, r5, [r0, #24]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3124,7 +3124,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #48]
MOV r10, r8
MOV r11, r9
# Round 10
/* Round 10 */
LDRD r4, r5, [r0, #16]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3204,7 +3204,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #40]
MOV r10, r8
MOV r11, r9
# Round 11
/* Round 11 */
LDRD r4, r5, [r0, #8]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3284,7 +3284,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #32]
MOV r10, r8
MOV r11, r9
# Round 12
/* Round 12 */
LDRD r4, r5, [r0]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3364,7 +3364,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #24]
MOV r10, r8
MOV r11, r9
# Round 13
/* Round 13 */
LDRD r4, r5, [r0, #56]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3444,7 +3444,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #16]
MOV r10, r8
MOV r11, r9
# Round 14
/* Round 14 */
LDRD r4, r5, [r0, #48]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3524,7 +3524,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0, #8]
MOV r10, r8
MOV r11, r9
# Round 15
/* Round 15 */
LDRD r4, r5, [r0, #40]
LSRS r6, r4, #14
LSRS r7, r5, #14
@ -3604,7 +3604,7 @@ L_SHA512_transform_len_start:
STRD r6, r7, [r0]
MOV r10, r8
MOV r11, r9
# Add in digest from start
/* Add in digest from start */
LDRD r4, r5, [r0]
LDRD r6, r7, [r0, #8]
LDRD r8, r9, [sp, #128]
@ -3656,15 +3656,15 @@ L_SHA512_transform_len_start:
SUBS r2, r2, #0x80
SUB r3, r3, #0x200
ADD r1, r1, #0x80
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
#ifdef __GNUC__
BNE L_SHA512_transform_len_begin
#else
BNE.N L_SHA512_transform_len_begin
BNE.W L_SHA512_transform_len_begin
#endif
EOR r0, r0, r0
ADD sp, sp, #0xc0
POP {r4, r5, r6, r7, r8, r9, r10, r11, pc}
# Cycle Count = 5021
/* Cycle Count = 5021 */
.size Transform_Sha512_Len,.-Transform_Sha512_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */
#endif /* WOLFSSL_SHA512 */

View File

@ -39,7 +39,7 @@
#ifdef WOLFSSL_ARMASM_INLINE
#ifdef WOLFSSL_ARMASM
#if !defined(__aarch64__) && defined(__arm__)
#if !defined(__aarch64__) && defined(__thumb__)
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
@ -108,8 +108,8 @@ void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
register wc_Sha512* sha512 __asm__ ("r0") = (wc_Sha512*)sha512_p;
register const byte* data __asm__ ("r1") = (const byte*)data_p;
register word32 len __asm__ ("r2") = (word32)len_p;
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
register uint64_t* L_SHA512_transform_len_k_c __asm__ ("r3") = (uint64_t*)&L_SHA512_transform_len_k;
#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */
__asm__ __volatile__ (
"SUB sp, sp, #0xc0\n\t"
@ -133,7 +133,7 @@ void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
"STRD r10, r11, [sp, #184]\n\t"
/* Start of loop processing a block */
"\n"
"L_SHA512_transform_len_begin_%=:\n\t"
"L_SHA512_transform_len_begin:\n\t"
/* Load, Reverse and Store W */
"LDR r4, [%[data]]\n\t"
"LDR r5, [%[data], #4]\n\t"
@ -239,7 +239,7 @@ void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
"MOV r12, #0x4\n\t"
/* Start of 16 rounds */
"\n"
"L_SHA512_transform_len_start_%=:\n\t"
"L_SHA512_transform_len_start:\n\t"
/* Round 0 */
"LDRD r4, r5, [%[sha512], #32]\n\t"
"LSRS r6, r4, #14\n\t"
@ -2226,10 +2226,10 @@ void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
"STRD r4, r5, [sp, #120]\n\t"
"ADD r3, r3, #0x80\n\t"
"SUBS r12, r12, #0x1\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_SHA512_transform_len_start_%=\n\t"
#ifdef __GNUC__
"BNE L_SHA512_transform_len_start\n\t"
#else
"BNE.N L_SHA512_transform_len_start_%=\n\t"
"BNE.W L_SHA512_transform_len_start\n\t"
#endif
/* Round 0 */
"LDRD r4, r5, [%[sha512], #32]\n\t"
@ -3563,15 +3563,21 @@ void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
"SUBS %[len], %[len], #0x80\n\t"
"SUB r3, r3, #0x200\n\t"
"ADD %[data], %[data], #0x80\n\t"
#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__)
"BNE L_SHA512_transform_len_begin_%=\n\t"
#ifdef __GNUC__
"BNE L_SHA512_transform_len_begin\n\t"
#else
"BNE.N L_SHA512_transform_len_begin_%=\n\t"
"BNE.W L_SHA512_transform_len_begin\n\t"
#endif
"EOR r0, r0, r0\n\t"
"ADD sp, sp, #0xc0\n\t"
: [sha512] "+r" (sha512), [data] "+r" (data), [len] "+r" (len), [L_SHA512_transform_len_k] "+r" (L_SHA512_transform_len_k_c)
#ifndef WOLFSSL_NO_VAR_ASSIGN_REG
: [sha512] "+r" (sha512), [data] "+r" (data), [len] "+r" (len),
[L_SHA512_transform_len_k] "+r" (L_SHA512_transform_len_k_c)
:
#else
: [sha512] "+r" (sha512), [data] "+r" (data), [len] "+r" (len)
: [L_SHA512_transform_len_k] "r" (L_SHA512_transform_len_k)
#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */
: "memory", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12"
);
}
@ -3580,7 +3586,7 @@ void Transform_Sha512_Len(wc_Sha512* sha512, const byte* data, word32 len)
#endif /* WOLFSSL_SHA512 */
#endif /* !__aarch64__ && __thumb__ */
#endif /* WOLFSSL_ARMASM */
#endif /* !defined(__aarch64__) && defined(__arm__) */
#endif /* !defined(__aarch64__) && defined(__thumb__) */
#endif /* WOLFSSL_ARMASM */
#endif /* WOLFSSL_ARMASM_INLINE */

File diff suppressed because it is too large Load Diff

View File

@ -55,6 +55,7 @@
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
#define __volatile__ volatile
#define WOLFSSL_NO_VAR_ASSIGN_REG
#endif /* __IAR_SYSTEMS_ICC__ */
#ifdef __KEIL__
#define __asm__ __asm

View File

@ -55,6 +55,7 @@
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
#define __volatile__ volatile
#define WOLFSSL_NO_VAR_ASSIGN_REG
#endif /* __IAR_SYSTEMS_ICC__ */
#ifdef __KEIL__
#define __asm__ __asm

View File

@ -59,6 +59,7 @@
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
#define __volatile__ volatile
#define WOLFSSL_NO_VAR_ASSIGN_REG
#endif /* __IAR_SYSTEMS_ICC__ */
#ifdef __KEIL__
#define __asm__ __asm

View File

@ -59,6 +59,7 @@
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
#define __volatile__ volatile
#define WOLFSSL_NO_VAR_ASSIGN_REG
#endif /* __IAR_SYSTEMS_ICC__ */
#ifdef __KEIL__
#define __asm__ __asm

File diff suppressed because it is too large Load Diff

View File

@ -55,6 +55,7 @@
#ifdef __IAR_SYSTEMS_ICC__
#define __asm__ asm
#define __volatile__ volatile
#define WOLFSSL_NO_VAR_ASSIGN_REG
#endif /* __IAR_SYSTEMS_ICC__ */
#ifdef __KEIL__
#define __asm__ __asm