From 786e21b107cc381b4548ebe9c789ee6e40108d8b Mon Sep 17 00:00:00 2001 From: Sean Parkinson Date: Tue, 12 May 2020 23:28:39 +1000 Subject: [PATCH] Fix SP Cortex-M ASM comments --- wolfcrypt/src/sp_cortexm.c | 324 ++++++++++++++++++------------------- 1 file changed, 162 insertions(+), 162 deletions(-) diff --git a/wolfcrypt/src/sp_cortexm.c b/wolfcrypt/src/sp_cortexm.c index ec2a5003b..a46954f69 100644 --- a/wolfcrypt/src/sp_cortexm.c +++ b/wolfcrypt/src/sp_cortexm.c @@ -13826,10 +13826,10 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "ldr r8, [%[a], #20]\n\t" "ldr r9, [%[a], #24]\n\t" "ldr r10, [%[a], #28]\n\t" - "# Clear overflow and underflow\n\t" + /* Clear overflow and underflow */ "mov r14, #0\n\t" "mov r12, #0\n\t" - "# t[0] = 1 1 0 -1 -1 -1 -1 0\n\t" + /* t[0] = 1 1 0 -1 -1 -1 -1 0 */ "adds r11, r2, r3\n\t" "adc r14, r14, #0\n\t" "subs r11, r11, r5\n\t" @@ -13840,11 +13840,11 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r11, r11, r9\n\t" "sbc r12, r12, #0\n\t" - "# Store t[0]\n\t" + /* Store t[0] */ "str r11, [sp, #0]\n\t" "neg r12, r12\n\t" "mov r11, #0\n\t" - "# t[1] = 0 1 1 0 -1 -1 -1 -1\n\t" + /* t[1] = 0 1 1 0 -1 -1 -1 -1 */ "adds r14, r14, r3\n\t" "adc r11, r11, #0\n\t" "adds r14, r14, r4\n\t" @@ -13860,11 +13860,11 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r14, r14, r10\n\t" "sbc r12, r12, #0\n\t" - "# Store t[1]\n\t" + /* Store t[1] */ "str r14, [sp, #4]\n\t" "neg r12, r12\n\t" "mov r14, #0\n\t" - "# t[2] = 0 0 1 1 0 -1 -1 -1\n\t" + /* t[2] = 0 0 1 1 0 -1 -1 -1 */ "adds r11, r11, r4\n\t" "adc r14, r14, #0\n\t" "adds r11, r11, r5\n\t" @@ -13878,11 +13878,11 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r11, r11, r10\n\t" "sbc r12, r12, #0\n\t" - "# Store t[2]\n\t" + /* Store t[2] */ "str r11, [sp, #8]\n\t" "neg r12, r12\n\t" "mov r11, #0\n\t" - "# t[3] = -1 -1 0 2 2 1 0 -1\n\t" + /* t[3] = -1 -1 0 2 2 1 0 -1 */ "adds r14, r14, r5\n\t" "adc r11, r11, #0\n\t" "adds r14, r14, r5\n\t" @@ -13902,11 +13902,11 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r14, r14, r10\n\t" "sbc r12, r12, #0\n\t" - "# Store t[3]\n\t" + /* Store t[3] */ "str r14, [sp, #12]\n\t" "neg r12, r12\n\t" "mov r14, #0\n\t" - "# t[4] = 0 -1 -1 0 2 2 1 0\n\t" + /* t[4] = 0 -1 -1 0 2 2 1 0 */ "adds r11, r11, r6\n\t" "adc r14, r14, #0\n\t" "adds r11, r11, r6\n\t" @@ -13924,11 +13924,11 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r11, r11, r4\n\t" "sbc r12, r12, #0\n\t" - "# Store t[4]\n\t" + /* Store t[4] */ "str r11, [sp, #16]\n\t" "neg r12, r12\n\t" "mov r11, #0\n\t" - "# t[5] = 0 0 -1 -1 0 2 2 1\n\t" + /* t[5] = 0 0 -1 -1 0 2 2 1 */ "adds r14, r14, r8\n\t" "adc r11, r11, #0\n\t" "adds r14, r14, r8\n\t" @@ -13946,11 +13946,11 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r14, r14, r5\n\t" "sbc r12, r12, #0\n\t" - "# Store t[5]\n\t" + /* Store t[5] */ "str r14, [sp, #20]\n\t" "neg r12, r12\n\t" "mov r14, #0\n\t" - "# t[6] = -1 -1 0 0 0 1 3 2\n\t" + /* t[6] = -1 -1 0 0 0 1 3 2 */ "adds r11, r11, r8\n\t" "adc r14, r14, #0\n\t" "adds r11, r11, r9\n\t" @@ -13970,11 +13970,11 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r11, r11, r3\n\t" "sbc r12, r12, #0\n\t" - "# Store t[6]\n\t" + /* Store t[6] */ "mov r9, r11\n\t" "neg r12, r12\n\t" "mov r11, #0\n\t" - "# t[7] = 1 0 -1 -1 -1 -1 0 3\n\t" + /* t[7] = 1 0 -1 -1 -1 -1 0 3 */ "adds r14, r14, r2\n\t" "adc r11, r11, #0\n\t" "adds r14, r14, r10\n\t" @@ -13994,8 +13994,8 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbc r12, r12, #0\n\t" "subs r14, r14, r8\n\t" "sbc r12, r12, #0\n\t" - "# Store t[7]\n\t" - "# Load intermediate\n\t" + /* Store t[7] */ + /* Load intermediate */ "ldr r2, [sp, #0]\n\t" "ldr r3, [sp, #4]\n\t" "ldr r4, [sp, #8]\n\t" @@ -14003,8 +14003,8 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "ldr r6, [sp, #16]\n\t" "ldr r8, [sp, #20]\n\t" "neg r12, r12\n\t" - "# Add overflow\n\t" - "# Subtract underflow - add neg underflow\n\t" + /* Add overflow */ + /* Subtract underflow - add neg underflow */ "adds r2, r2, r11\n\t" "adcs r3, r3, #0\n\t" "adcs r4, r4, #0\n\t" @@ -14013,8 +14013,8 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "adcs r8, r8, #0\n\t" "adcs r9, r9, r12\n\t" "adc r14, r14, r11\n\t" - "# Subtract overflow\n\t" - "# Add underflow - subtract neg underflow\n\t" + /* Subtract overflow */ + /* Add underflow - subtract neg underflow */ "subs r2, r2, r12\n\t" "sbcs r3, r3, #0\n\t" "sbcs r4, r4, #0\n\t" @@ -14023,7 +14023,7 @@ static int sp_256_mod_mul_norm_8(sp_digit* r, const sp_digit* a, const sp_digit* "sbcs r8, r8, #0\n\t" "sbcs r9, r9, r11\n\t" "sbc r14, r14, r12\n\t" - "# Store result\n\t" + /* Store result */ "str r2, [%[r], #0]\n\t" "str r3, [%[r], #4]\n\t" "str r4, [%[r], #8]\n\t" @@ -14243,18 +14243,18 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const __asm__ __volatile__ ( "sub sp, sp, #68\n\t" "mov r5, #0\n\t" - "# A[0] * B[0]\n\t" + /* A[0] * B[0] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r9, r10, r6, r8\n\t" "str r9, [sp, #0]\n\t" - "# A[0] * B[1]\n\t" + /* A[0] * B[1] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adc r11, r4, #0\n\t" - "# A[1] * B[0]\n\t" + /* A[1] * B[0] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14262,20 +14262,20 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r11, r4, r11\n\t" "adc r14, r5, #0\n\t" "str r10, [sp, #4]\n\t" - "# A[0] * B[2]\n\t" + /* A[0] * B[2] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adc r14, r4, r14\n\t" - "# A[1] * B[1]\n\t" + /* A[1] * B[1] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, #0\n\t" - "# A[2] * B[0]\n\t" + /* A[2] * B[0] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14283,28 +14283,28 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" "str r11, [sp, #8]\n\t" - "# A[0] * B[3]\n\t" + /* A[0] * B[3] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, #0\n\t" - "# A[1] * B[2]\n\t" + /* A[1] * B[2] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[2] * B[1]\n\t" + /* A[2] * B[1] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[3] * B[0]\n\t" + /* A[3] * B[0] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14312,35 +14312,35 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" "str r14, [sp, #12]\n\t" - "# A[0] * B[4]\n\t" + /* A[0] * B[4] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, #0\n\t" - "# A[1] * B[3]\n\t" + /* A[1] * B[3] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[2] * B[2]\n\t" + /* A[2] * B[2] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[3] * B[1]\n\t" + /* A[3] * B[1] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[4] * B[0]\n\t" + /* A[4] * B[0] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14348,42 +14348,42 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" "str r9, [sp, #16]\n\t" - "# A[0] * B[5]\n\t" + /* A[0] * B[5] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, #0\n\t" - "# A[1] * B[4]\n\t" + /* A[1] * B[4] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[2] * B[3]\n\t" + /* A[2] * B[3] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[3] * B[2]\n\t" + /* A[3] * B[2] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[4] * B[1]\n\t" + /* A[4] * B[1] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[5] * B[0]\n\t" + /* A[5] * B[0] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14391,49 +14391,49 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" "str r10, [sp, #20]\n\t" - "# A[0] * B[6]\n\t" + /* A[0] * B[6] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, #0\n\t" - "# A[1] * B[5]\n\t" + /* A[1] * B[5] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[2] * B[4]\n\t" + /* A[2] * B[4] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[3] * B[3]\n\t" + /* A[3] * B[3] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[4] * B[2]\n\t" + /* A[4] * B[2] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[5] * B[1]\n\t" + /* A[5] * B[1] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[6] * B[0]\n\t" + /* A[6] * B[0] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14441,56 +14441,56 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" "str r11, [sp, #24]\n\t" - "# A[0] * B[7]\n\t" + /* A[0] * B[7] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, #0\n\t" - "# A[1] * B[6]\n\t" + /* A[1] * B[6] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[2] * B[5]\n\t" + /* A[2] * B[5] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[3] * B[4]\n\t" + /* A[3] * B[4] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[4] * B[3]\n\t" + /* A[4] * B[3] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[5] * B[2]\n\t" + /* A[5] * B[2] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[6] * B[1]\n\t" + /* A[6] * B[1] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[7] * B[0]\n\t" + /* A[7] * B[0] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #0]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14498,49 +14498,49 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" "str r14, [sp, #28]\n\t" - "# A[1] * B[7]\n\t" + /* A[1] * B[7] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, #0\n\t" - "# A[2] * B[6]\n\t" + /* A[2] * B[6] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[3] * B[5]\n\t" + /* A[3] * B[5] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[4] * B[4]\n\t" + /* A[4] * B[4] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[5] * B[3]\n\t" + /* A[5] * B[3] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[6] * B[2]\n\t" + /* A[6] * B[2] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[7] * B[1]\n\t" + /* A[7] * B[1] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #4]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14548,42 +14548,42 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" "str r9, [sp, #32]\n\t" - "# A[2] * B[7]\n\t" + /* A[2] * B[7] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, #0\n\t" - "# A[3] * B[6]\n\t" + /* A[3] * B[6] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[4] * B[5]\n\t" + /* A[4] * B[5] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[5] * B[4]\n\t" + /* A[5] * B[4] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[6] * B[3]\n\t" + /* A[6] * B[3] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[7] * B[2]\n\t" + /* A[7] * B[2] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #8]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14591,35 +14591,35 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" "str r10, [sp, #36]\n\t" - "# A[3] * B[7]\n\t" + /* A[3] * B[7] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, #0\n\t" - "# A[4] * B[6]\n\t" + /* A[4] * B[6] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[5] * B[5]\n\t" + /* A[5] * B[5] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[6] * B[4]\n\t" + /* A[6] * B[4] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[7] * B[3]\n\t" + /* A[7] * B[3] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #12]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14627,28 +14627,28 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" "str r11, [sp, #40]\n\t" - "# A[4] * B[7]\n\t" + /* A[4] * B[7] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, #0\n\t" - "# A[5] * B[6]\n\t" + /* A[5] * B[6] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[6] * B[5]\n\t" + /* A[6] * B[5] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[7] * B[4]\n\t" + /* A[7] * B[4] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #16]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14656,42 +14656,42 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" "str r14, [sp, #44]\n\t" - "# A[5] * B[7]\n\t" + /* A[5] * B[7] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, #0\n\t" - "# A[6] * B[6]\n\t" + /* A[6] * B[6] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[7] * B[5]\n\t" + /* A[7] * B[5] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[6] * B[7]\n\t" + /* A[6] * B[7] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, #0\n\t" - "# A[7] * B[6]\n\t" + /* A[7] * B[6] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[7] * B[7]\n\t" + /* A[7] * B[7] */ "ldr r6, [%[a], #28]\n\t" "ldr r8, [%[b], #28]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14701,7 +14701,7 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "str r10, [sp, #52]\n\t" "str r11, [sp, #56]\n\t" "str r14, [sp, #60]\n\t" - "# Start Reduction\n\t" + /* Start Reduction */ "ldr r4, [sp, #0]\n\t" "ldr r5, [sp, #4]\n\t" "ldr r6, [sp, #8]\n\t" @@ -14710,16 +14710,16 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "ldr r10, [sp, #20]\n\t" "ldr r11, [sp, #24]\n\t" "ldr r14, [sp, #28]\n\t" - "# mu = a[0]-a[7] + a[0]-a[4] << 96 + (a[0]-a[1] * 2) << 192\n\t" - "# - a[0] << 224\n\t" - "# + (a[0]-a[1] * 2) << (6 * 32)\n\t" + /* mu = a[0]-a[7] + a[0]-a[4] << 96 + (a[0]-a[1] * 2) << 192 */ + /* - a[0] << 224 */ + /* + (a[0]-a[1] * 2) << (6 * 32) */ "adds r11, r11, r4\n\t" "adc r14, r14, r5\n\t" "adds r11, r11, r4\n\t" "adc r14, r14, r5\n\t" - "# - a[0] << (7 * 32)\n\t" + /* - a[0] << (7 * 32) */ "sub r14, r14, r4\n\t" - "# + a[0]-a[4] << (3 * 32)\n\t" + /* + a[0]-a[4] << (3 * 32) */ "mov %[a], r8\n\t" "mov %[b], r9\n\t" "adds r8, r8, r4\n\t" @@ -14733,17 +14733,17 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "str r8, [sp, #12]\n\t" "str r9, [sp, #16]\n\t" "str r10, [sp, #20]\n\t" - "# a += mu * m\n\t" - "# += mu * ((1 << 256) - (1 << 224) + (1 << 192) + (1 << 96) - 1)\n\t" + /* a += mu * m */ + /* += mu * ((1 << 256) - (1 << 224) + (1 << 192) + (1 << 96) - 1) */ "mov %[a], #0\n\t" - "# a[6] += t[0] + t[3]\n\t" + /* a[6] += t[0] + t[3] */ "ldr r3, [sp, #24]\n\t" "adds r3, r3, r4\n\t" "adc %[b], %[a], #0\n\t" "adds r3, r3, r8\n\t" "adc %[b], %[b], #0\n\t" "str r11, [sp, #24]\n\t" - "# a[7] += t[1] + t[4]\n\t" + /* a[7] += t[1] + t[4] */ "ldr r3, [sp, #28]\n\t" "adds r3, r3, %[b]\n\t" "adc %[b], %[a], #0\n\t" @@ -14753,7 +14753,7 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adc %[b], %[b], #0\n\t" "str r14, [sp, #28]\n\t" "str r3, [sp, #64]\n\t" - "# a[8] += t[0] + t[2] + t[5]\n\t" + /* a[8] += t[0] + t[2] + t[5] */ "ldr r3, [sp, #32]\n\t" "adds r3, r3, %[b]\n\t" "adc %[b], %[a], #0\n\t" @@ -14764,8 +14764,8 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adds r3, r3, r10\n\t" "adc %[b], %[b], #0\n\t" "str r3, [sp, #32]\n\t" - "# a[9] += t[1] + t[3] + t[6]\n\t" - "# a[10] += t[2] + t[4] + t[7]\n\t" + /* a[9] += t[1] + t[3] + t[6] */ + /* a[10] += t[2] + t[4] + t[7] */ "ldr r3, [sp, #36]\n\t" "ldr r4, [sp, #40]\n\t" "adds r3, r3, %[b]\n\t" @@ -14782,10 +14782,10 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "adc %[b], %[b], #0\n\t" "str r3, [sp, #36]\n\t" "str r4, [sp, #40]\n\t" - "# a[11] += t[3] + t[5]\n\t" - "# a[12] += t[4] + t[6]\n\t" - "# a[13] += t[5] + t[7]\n\t" - "# a[14] += t[6]\n\t" + /* a[11] += t[3] + t[5] */ + /* a[12] += t[4] + t[6] */ + /* a[13] += t[5] + t[7] */ + /* a[14] += t[6] */ "ldr r3, [sp, #44]\n\t" "ldr r4, [sp, #48]\n\t" "ldr r5, [sp, #52]\n\t" @@ -14809,7 +14809,7 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "str r4, [sp, #48]\n\t" "str r5, [sp, #52]\n\t" "str r6, [sp, #56]\n\t" - "# a[15] += t[7]\n\t" + /* a[15] += t[7] */ "ldr r3, [sp, #60]\n\t" "adds r3, r3, %[b]\n\t" "adc %[b], %[a], #0\n\t" @@ -14850,7 +14850,7 @@ SP_NOINLINE static void sp_256_mont_mul_8(sp_digit* r, const sp_digit* a, const "str r5, [sp, #52]\n\t" "str r6, [sp, #56]\n\t" "str r8, [sp, #60]\n\t" - "# mask m and sub from result if overflow\n\t" + /* mask m and sub from result if overflow */ "sub %[b], %[a], %[b]\n\t" "and %[a], %[b], #1\n\t" "ldr r3, [sp, #32]\n\t" @@ -14900,25 +14900,25 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const __asm__ __volatile__ ( "sub sp, sp, #68\n\t" "mov r5, #0\n\t" - "# A[0] * A[1]\n\t" + /* A[0] * A[1] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[a], #4]\n\t" "umull r10, r11, r6, r8\n\t" "str r10, [sp, #4]\n\t" - "# A[0] * A[2]\n\t" + /* A[0] * A[2] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[a], #8]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adc r14, r4, #0\n\t" "str r11, [sp, #8]\n\t" - "# A[0] * A[3]\n\t" + /* A[0] * A[3] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[a], #12]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adc r9, r4, #0\n\t" - "# A[1] * A[2]\n\t" + /* A[1] * A[2] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[a], #8]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14926,13 +14926,13 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r9, r4, r9\n\t" "adc r10, r5, #0\n\t" "str r14, [sp, #12]\n\t" - "# A[0] * A[4]\n\t" + /* A[0] * A[4] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[a], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adc r10, r4, r10\n\t" - "# A[1] * A[3]\n\t" + /* A[1] * A[3] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[a], #12]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14940,20 +14940,20 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r10, r4, r10\n\t" "adc r11, r5, #0\n\t" "str r9, [sp, #16]\n\t" - "# A[0] * A[5]\n\t" + /* A[0] * A[5] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[a], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adc r11, r4, r11\n\t" - "# A[1] * A[4]\n\t" + /* A[1] * A[4] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[a], #16]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, #0\n\t" - "# A[2] * A[3]\n\t" + /* A[2] * A[3] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[a], #12]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14961,21 +14961,21 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" "str r10, [sp, #20]\n\t" - "# A[0] * A[6]\n\t" + /* A[0] * A[6] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[a], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, #0\n\t" - "# A[1] * A[5]\n\t" + /* A[1] * A[5] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[a], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" - "# A[2] * A[4]\n\t" + /* A[2] * A[4] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[a], #16]\n\t" "umull r3, r4, r6, r8\n\t" @@ -14983,28 +14983,28 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" "str r11, [sp, #24]\n\t" - "# A[0] * A[7]\n\t" + /* A[0] * A[7] */ "ldr r6, [%[a], #0]\n\t" "ldr r8, [%[a], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, #0\n\t" - "# A[1] * A[6]\n\t" + /* A[1] * A[6] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[a], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[2] * A[5]\n\t" + /* A[2] * A[5] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[a], #20]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" - "# A[3] * A[4]\n\t" + /* A[3] * A[4] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[a], #16]\n\t" "umull r3, r4, r6, r8\n\t" @@ -15012,21 +15012,21 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" "str r14, [sp, #28]\n\t" - "# A[1] * A[7]\n\t" + /* A[1] * A[7] */ "ldr r6, [%[a], #4]\n\t" "ldr r8, [%[a], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, #0\n\t" - "# A[2] * A[6]\n\t" + /* A[2] * A[6] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[a], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r9, r3, r9\n\t" "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" - "# A[3] * A[5]\n\t" + /* A[3] * A[5] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[a], #20]\n\t" "umull r3, r4, r6, r8\n\t" @@ -15034,21 +15034,21 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r10, r4, r10\n\t" "adc r11, r5, r11\n\t" "str r9, [sp, #32]\n\t" - "# A[2] * A[7]\n\t" + /* A[2] * A[7] */ "ldr r6, [%[a], #8]\n\t" "ldr r8, [%[a], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, #0\n\t" - "# A[3] * A[6]\n\t" + /* A[3] * A[6] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[a], #24]\n\t" "umull r3, r4, r6, r8\n\t" "adds r10, r3, r10\n\t" "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" - "# A[4] * A[5]\n\t" + /* A[4] * A[5] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[a], #20]\n\t" "umull r3, r4, r6, r8\n\t" @@ -15056,14 +15056,14 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r11, r4, r11\n\t" "adc r14, r5, r14\n\t" "str r10, [sp, #36]\n\t" - "# A[3] * A[7]\n\t" + /* A[3] * A[7] */ "ldr r6, [%[a], #12]\n\t" "ldr r8, [%[a], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r11, r3, r11\n\t" "adcs r14, r4, r14\n\t" "adc r9, r5, #0\n\t" - "# A[4] * A[6]\n\t" + /* A[4] * A[6] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[a], #24]\n\t" "umull r3, r4, r6, r8\n\t" @@ -15071,14 +15071,14 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r14, r4, r14\n\t" "adc r9, r5, r9\n\t" "str r11, [sp, #40]\n\t" - "# A[4] * A[7]\n\t" + /* A[4] * A[7] */ "ldr r6, [%[a], #16]\n\t" "ldr r8, [%[a], #28]\n\t" "umull r3, r4, r6, r8\n\t" "adds r14, r3, r14\n\t" "adcs r9, r4, r9\n\t" "adc r10, r5, #0\n\t" - "# A[5] * A[6]\n\t" + /* A[5] * A[6] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[a], #24]\n\t" "umull r3, r4, r6, r8\n\t" @@ -15086,7 +15086,7 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r9, r4, r9\n\t" "adc r10, r5, r10\n\t" "str r14, [sp, #44]\n\t" - "# A[5] * A[7]\n\t" + /* A[5] * A[7] */ "ldr r6, [%[a], #20]\n\t" "ldr r8, [%[a], #28]\n\t" "umull r3, r4, r6, r8\n\t" @@ -15094,7 +15094,7 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adcs r10, r4, r10\n\t" "adc r11, r5, #0\n\t" "str r9, [sp, #48]\n\t" - "# A[6] * A[7]\n\t" + /* A[6] * A[7] */ "ldr r6, [%[a], #24]\n\t" "ldr r8, [%[a], #28]\n\t" "umull r3, r4, r6, r8\n\t" @@ -15102,7 +15102,7 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adc r11, r4, r11\n\t" "str r10, [sp, #52]\n\t" "str r11, [sp, #56]\n\t" - "# Double\n\t" + /* Double */ "ldr r4, [sp, #4]\n\t" "ldr r6, [sp, #8]\n\t" "ldr r8, [sp, #12]\n\t" @@ -15150,10 +15150,10 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "ldr r4, [sp, #4]\n\t" "ldr r5, [sp, #8]\n\t" "ldr r12, [sp, #12]\n\t" - "# A[0] * A[0]\n\t" + /* A[0] * A[0] */ "ldr r6, [%[a], #0]\n\t" "umull r9, r10, r6, r6\n\t" - "# A[1] * A[1]\n\t" + /* A[1] * A[1] */ "ldr r6, [%[a], #4]\n\t" "umull r11, r14, r6, r6\n\t" "adds r10, r10, r4\n\t" @@ -15167,10 +15167,10 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "ldr r4, [sp, #20]\n\t" "ldr r5, [sp, #24]\n\t" "ldr r12, [sp, #28]\n\t" - "# A[2] * A[2]\n\t" + /* A[2] * A[2] */ "ldr r6, [%[a], #8]\n\t" "umull r9, r10, r6, r6\n\t" - "# A[3] * A[3]\n\t" + /* A[3] * A[3] */ "ldr r6, [%[a], #12]\n\t" "umull r11, r14, r6, r6\n\t" "adcs r9, r9, r3\n\t" @@ -15185,10 +15185,10 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "ldr r4, [sp, #36]\n\t" "ldr r5, [sp, #40]\n\t" "ldr r12, [sp, #44]\n\t" - "# A[4] * A[4]\n\t" + /* A[4] * A[4] */ "ldr r6, [%[a], #16]\n\t" "umull r9, r10, r6, r6\n\t" - "# A[5] * A[5]\n\t" + /* A[5] * A[5] */ "ldr r6, [%[a], #20]\n\t" "umull r11, r14, r6, r6\n\t" "adcs r9, r9, r3\n\t" @@ -15203,10 +15203,10 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "ldr r4, [sp, #52]\n\t" "ldr r5, [sp, #56]\n\t" "ldr r12, [sp, #60]\n\t" - "# A[6] * A[6]\n\t" + /* A[6] * A[6] */ "ldr r6, [%[a], #24]\n\t" "umull r9, r10, r6, r6\n\t" - "# A[7] * A[7]\n\t" + /* A[7] * A[7] */ "ldr r6, [%[a], #28]\n\t" "umull r11, r14, r6, r6\n\t" "adcs r9, r9, r3\n\t" @@ -15217,7 +15217,7 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "str r10, [sp, #52]\n\t" "str r11, [sp, #56]\n\t" "str r14, [sp, #60]\n\t" - "# Start Reduction\n\t" + /* Start Reduction */ "ldr r4, [sp, #0]\n\t" "ldr r5, [sp, #4]\n\t" "ldr r6, [sp, #8]\n\t" @@ -15226,16 +15226,16 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "ldr r10, [sp, #20]\n\t" "ldr r11, [sp, #24]\n\t" "ldr r14, [sp, #28]\n\t" - "# mu = a[0]-a[7] + a[0]-a[4] << 96 + (a[0]-a[1] * 2) << 192\n\t" - "# - a[0] << 224\n\t" - "# + (a[0]-a[1] * 2) << (6 * 32)\n\t" + /* mu = a[0]-a[7] + a[0]-a[4] << 96 + (a[0]-a[1] * 2) << 192 */ + /* - a[0] << 224 */ + /* + (a[0]-a[1] * 2) << (6 * 32) */ "adds r11, r11, r4\n\t" "adc r14, r14, r5\n\t" "adds r11, r11, r4\n\t" "adc r14, r14, r5\n\t" - "# - a[0] << (7 * 32)\n\t" + /* - a[0] << (7 * 32) */ "sub r14, r14, r4\n\t" - "# + a[0]-a[4] << (3 * 32)\n\t" + /* + a[0]-a[4] << (3 * 32) */ "mov %[a], r8\n\t" "mov r12, r9\n\t" "adds r8, r8, r4\n\t" @@ -15249,17 +15249,17 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "str r8, [sp, #12]\n\t" "str r9, [sp, #16]\n\t" "str r10, [sp, #20]\n\t" - "# a += mu * m\n\t" - "# += mu * ((1 << 256) - (1 << 224) + (1 << 192) + (1 << 96) - 1)\n\t" + /* a += mu * m */ + /* += mu * ((1 << 256) - (1 << 224) + (1 << 192) + (1 << 96) - 1) */ "mov %[a], #0\n\t" - "# a[6] += t[0] + t[3]\n\t" + /* a[6] += t[0] + t[3] */ "ldr r3, [sp, #24]\n\t" "adds r3, r3, r4\n\t" "adc r12, %[a], #0\n\t" "adds r3, r3, r8\n\t" "adc r12, r12, #0\n\t" "str r11, [sp, #24]\n\t" - "# a[7] += t[1] + t[4]\n\t" + /* a[7] += t[1] + t[4] */ "ldr r3, [sp, #28]\n\t" "adds r3, r3, r12\n\t" "adc r12, %[a], #0\n\t" @@ -15269,7 +15269,7 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adc r12, r12, #0\n\t" "str r14, [sp, #28]\n\t" "str r3, [sp, #64]\n\t" - "# a[8] += t[0] + t[2] + t[5]\n\t" + /* a[8] += t[0] + t[2] + t[5] */ "ldr r3, [sp, #32]\n\t" "adds r3, r3, r12\n\t" "adc r12, %[a], #0\n\t" @@ -15280,8 +15280,8 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adds r3, r3, r10\n\t" "adc r12, r12, #0\n\t" "str r3, [sp, #32]\n\t" - "# a[9] += t[1] + t[3] + t[6]\n\t" - "# a[10] += t[2] + t[4] + t[7]\n\t" + /* a[9] += t[1] + t[3] + t[6] */ + /* a[10] += t[2] + t[4] + t[7] */ "ldr r3, [sp, #36]\n\t" "ldr r4, [sp, #40]\n\t" "adds r3, r3, r12\n\t" @@ -15298,10 +15298,10 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "adc r12, r12, #0\n\t" "str r3, [sp, #36]\n\t" "str r4, [sp, #40]\n\t" - "# a[11] += t[3] + t[5]\n\t" - "# a[12] += t[4] + t[6]\n\t" - "# a[13] += t[5] + t[7]\n\t" - "# a[14] += t[6]\n\t" + /* a[11] += t[3] + t[5] */ + /* a[12] += t[4] + t[6] */ + /* a[13] += t[5] + t[7] */ + /* a[14] += t[6] */ "ldr r3, [sp, #44]\n\t" "ldr r4, [sp, #48]\n\t" "ldr r5, [sp, #52]\n\t" @@ -15325,7 +15325,7 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "str r4, [sp, #48]\n\t" "str r5, [sp, #52]\n\t" "str r6, [sp, #56]\n\t" - "# a[15] += t[7]\n\t" + /* a[15] += t[7] */ "ldr r3, [sp, #60]\n\t" "adds r3, r3, r12\n\t" "adc r12, %[a], #0\n\t" @@ -15366,7 +15366,7 @@ SP_NOINLINE static void sp_256_mont_sqr_8(sp_digit* r, const sp_digit* a, const "str r5, [sp, #52]\n\t" "str r6, [sp, #56]\n\t" "str r8, [sp, #60]\n\t" - "# mask m and sub from result if overflow\n\t" + /* mask m and sub from result if overflow */ "sub r12, %[a], r12\n\t" "and %[a], r12, #1\n\t" "ldr r3, [sp, #32]\n\t"