Merge pull request #5748 from SparkiDev/arm32_asm_lr_fixup

ARM32 ASM: fix asm saving and restoring of registers
This commit is contained in:
JacobBarthelmeh
2022-10-28 07:37:02 -06:00
committed by GitHub
3 changed files with 14 additions and 14 deletions

View File

@ -42,7 +42,7 @@ fe_init:
.globl fe_frombytes .globl fe_frombytes
.type fe_frombytes, %function .type fe_frombytes, %function
fe_frombytes: fe_frombytes:
push {r4, r5, r6, r7, r8, r9} push {r4, r5, r6, r7, r8, r9, lr}
#if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7) #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
ldr r2, [r1] ldr r2, [r1]
ldr r3, [r1, #4] ldr r3, [r1, #4]
@ -92,14 +92,14 @@ fe_frombytes:
#else #else
strd r8, r9, [r0, #24] strd r8, r9, [r0, #24]
#endif #endif
pop {r4, r5, r6, r7, r8, pc} pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_frombytes,.-fe_frombytes .size fe_frombytes,.-fe_frombytes
.text .text
.align 4 .align 4
.globl fe_tobytes .globl fe_tobytes
.type fe_tobytes, %function .type fe_tobytes, %function
fe_tobytes: fe_tobytes:
push {r4, r5, r6, r7, r8, r9} push {r4, r5, r6, r7, r8, r9, lr}
#if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7) #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
ldr r2, [r1] ldr r2, [r1]
ldr r3, [r1, #4] ldr r3, [r1, #4]
@ -167,7 +167,7 @@ fe_tobytes:
#else #else
strd r8, r9, [r0, #24] strd r8, r9, [r0, #24]
#endif #endif
pop {r4, r5, r6, r7, r8, pc} pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_tobytes,.-fe_tobytes .size fe_tobytes,.-fe_tobytes
.text .text
.align 4 .align 4
@ -243,7 +243,7 @@ fe_0:
.globl fe_copy .globl fe_copy
.type fe_copy, %function .type fe_copy, %function
fe_copy: fe_copy:
push {r4, r5} push {r4, r5, lr}
# Copy # Copy
#if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7) #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
ldr r2, [r1] ldr r2, [r1]
@ -293,7 +293,7 @@ fe_copy:
#else #else
strd r4, r5, [r0, #24] strd r4, r5, [r0, #24]
#endif #endif
pop {r4, pc} pop {r4, r5, pc}
.size fe_copy,.-fe_copy .size fe_copy,.-fe_copy
.text .text
.align 4 .align 4
@ -627,7 +627,7 @@ fe_neg:
.globl fe_isnonzero .globl fe_isnonzero
.type fe_isnonzero, %function .type fe_isnonzero, %function
fe_isnonzero: fe_isnonzero:
push {r4, r5, r6, r7, r8, r9} push {r4, r5, r6, r7, r8, r9, lr}
#if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7) #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
ldr r2, [r0] ldr r2, [r0]
ldr r3, [r0, #4] ldr r3, [r0, #4]
@ -678,14 +678,14 @@ fe_isnonzero:
orr r4, r4, r6 orr r4, r4, r6
orr r2, r2, r8 orr r2, r2, r8
orr r0, r2, r4 orr r0, r2, r4
pop {r4, r5, r6, r7, r8, pc} pop {r4, r5, r6, r7, r8, r9, pc}
.size fe_isnonzero,.-fe_isnonzero .size fe_isnonzero,.-fe_isnonzero
.text .text
.align 4 .align 4
.globl fe_isnegative .globl fe_isnegative
.type fe_isnegative, %function .type fe_isnegative, %function
fe_isnegative: fe_isnegative:
push {r4, r5} push {r4, r5, lr}
#if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7) #if defined(WOLFSSL_SP_ARM_ARCH) && (WOLFSSL_SP_ARM_ARCH < 7)
ldr r2, [r0] ldr r2, [r0]
ldr r3, [r0, #4] ldr r3, [r0, #4]
@ -722,7 +722,7 @@ fe_isnegative:
and r0, r2, #1 and r0, r2, #1
lsr r1, r1, #31 lsr r1, r1, #31
eor r0, r0, r1 eor r0, r0, r1
pop {r4, pc} pop {r4, r5, pc}
.size fe_isnegative,.-fe_isnegative .size fe_isnegative,.-fe_isnegative
.text .text
.align 4 .align 4

View File

@ -104,7 +104,7 @@ L_SHA256_transform_len_k:
.globl Transform_Sha256_Len .globl Transform_Sha256_Len
.type Transform_Sha256_Len, %function .type Transform_Sha256_Len, %function
Transform_Sha256_Len: Transform_Sha256_Len:
push {r4, r5, r6, r7, r8, r9, r10, r11} push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0xc0 sub sp, sp, #0xc0
adr r3, L_SHA256_transform_len_k adr r3, L_SHA256_transform_len_k
# Copy digest to add in at end # Copy digest to add in at end
@ -1654,7 +1654,7 @@ L_SHA256_transform_len_start:
add r1, r1, #0x40 add r1, r1, #0x40
bne L_SHA256_transform_len_begin bne L_SHA256_transform_len_begin
add sp, sp, #0xc0 add sp, sp, #0xc0
pop {r4, r5, r6, r7, r8, r9, r10, pc} pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size Transform_Sha256_Len,.-Transform_Sha256_Len .size Transform_Sha256_Len,.-Transform_Sha256_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */ #endif /* WOLFSSL_ARMASM_NO_NEON */
#ifndef WOLFSSL_ARMASM_NO_NEON #ifndef WOLFSSL_ARMASM_NO_NEON

View File

@ -200,7 +200,7 @@ L_SHA512_transform_len_k:
.globl Transform_Sha512_Len .globl Transform_Sha512_Len
.type Transform_Sha512_Len, %function .type Transform_Sha512_Len, %function
Transform_Sha512_Len: Transform_Sha512_Len:
push {r4, r5, r6, r7, r8, r9, r10, r11} push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
sub sp, sp, #0xc0 sub sp, sp, #0xc0
adr r3, L_SHA512_transform_len_k adr r3, L_SHA512_transform_len_k
# Copy digest to add in at end # Copy digest to add in at end
@ -7563,7 +7563,7 @@ L_SHA512_transform_len_start:
bne L_SHA512_transform_len_begin bne L_SHA512_transform_len_begin
eor r0, r0, r0 eor r0, r0, r0
add sp, sp, #0xc0 add sp, sp, #0xc0
pop {r4, r5, r6, r7, r8, r9, r10, pc} pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
.size Transform_Sha512_Len,.-Transform_Sha512_Len .size Transform_Sha512_Len,.-Transform_Sha512_Len
#endif /* WOLFSSL_ARMASM_NO_NEON */ #endif /* WOLFSSL_ARMASM_NO_NEON */
#ifndef WOLFSSL_ARMASM_NO_NEON #ifndef WOLFSSL_ARMASM_NO_NEON