diff --git a/src/include.am b/src/include.am index 36607f5ce..61f89f86d 100644 --- a/src/include.am +++ b/src/include.am @@ -996,6 +996,14 @@ src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/chacha.c if BUILD_ARMASM_NEON src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/armv8-chacha.c else +if BUILD_ARMASM +src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/thumb2-chacha.c +if BUILD_ARMASM_INLINE +src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/thumb2-chacha-asm_c.c +else +src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/arm/thumb2-chacha-asm.S +endif !BUILD_ARMASM_INLINE +endif BUILD_ARMASM if BUILD_RISCV_ASM src_libwolfssl@LIBSUFFIX@_la_SOURCES += wolfcrypt/src/port/riscv/riscv-64-chacha.c endif BUILD_RISCV_ASM diff --git a/wolfcrypt/src/chacha.c b/wolfcrypt/src/chacha.c index 1bad41dfb..f7ee6bba3 100644 --- a/wolfcrypt/src/chacha.c +++ b/wolfcrypt/src/chacha.c @@ -72,7 +72,8 @@ Public domain. #endif /* HAVE_CHACHA */ -#if defined(WOLFSSL_ARMASM) && !defined(WOLFSSL_ARMASM_NO_NEON) +#if defined(WOLFSSL_ARMASM) && (!defined(WOLFSSL_ARMASM_NO_NEON) || \ + defined(__thumb__)) /* implementation is located in wolfcrypt/src/port/arm/armv8-chacha.c */ #elif defined(WOLFSSL_RISCV_ASM) diff --git a/wolfcrypt/src/port/arm/thumb2-chacha-asm.S b/wolfcrypt/src/port/arm/thumb2-chacha-asm.S new file mode 100644 index 000000000..4c3c2e7e7 --- /dev/null +++ b/wolfcrypt/src/port/arm/thumb2-chacha-asm.S @@ -0,0 +1,575 @@ +/* thumb2-chacha-asm + * + * Copyright (C) 2006-2024 wolfSSL Inc. + * + * This file is part of wolfSSL. + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA + */ + +/* Generated using (from wolfssl): + * cd ../scripts + * ruby ./chacha/chacha.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-chacha-asm.S + */ + +#ifdef HAVE_CONFIG_H + #include +#endif /* HAVE_CONFIG_H */ +#include + +#ifdef WOLFSSL_ARMASM +#if !defined(__aarch64__) && defined(__thumb__) +#ifndef WOLFSSL_ARMASM_INLINE + .thumb + .syntax unified +#ifdef HAVE_CHACHA + .text + .align 4 + .globl wc_chacha_setiv + .type wc_chacha_setiv, %function +wc_chacha_setiv: + PUSH {r4, r5, r6, lr} + ADD r3, r0, #0x34 + LDR r4, [r1] + LDR r5, [r1, #4] + LDR r6, [r1, #8] + STR r2, [r0, #48] +#ifdef BIG_ENDIAN_ORDER + REV r4, r4 + REV r5, r5 + REV r6, r6 +#endif /* BIG_ENDIAN_ORDER */ + STM r3, {r4, r5, r6} + POP {r4, r5, r6, pc} + /* Cycle Count = 26 */ + .size wc_chacha_setiv,.-wc_chacha_setiv + .text + .type L_chacha_thumb2_constants, %object + .size L_chacha_thumb2_constants, 32 + .align 4 +L_chacha_thumb2_constants: + .word 0x61707865 + .word 0x3120646e + .word 0x79622d36 + .word 0x6b206574 + .word 0x61707865 + .word 0x3320646e + .word 0x79622d32 + .word 0x6b206574 + .text + .align 4 + .globl wc_chacha_setkey + .type wc_chacha_setkey, %function +wc_chacha_setkey: + PUSH {r4, r5, r6, r7, lr} + ADR r7, L_chacha_thumb2_constants + SUBS r2, r2, #0x10 + ADD r7, r7, r2 + /* Start state with constants */ + LDM r7, {r3, r4, r5, r6} + STM r0!, {r3, r4, r5, r6} + /* Next is first 16 bytes of key. */ + LDR r3, [r1] + LDR r4, [r1, #4] + LDR r5, [r1, #8] + LDR r6, [r1, #12] +#ifdef BIG_ENDIAN_ORDER + REV r3, r3 + REV r4, r4 + REV r5, r5 + REV r6, r6 +#endif /* BIG_ENDIAN_ORDER */ + STM r0!, {r3, r4, r5, r6} + /* Next 16 bytes of key. */ +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_chacha_thumb2_setkey_same_keyb_ytes +#else + BEQ.N L_chacha_thumb2_setkey_same_keyb_ytes +#endif + /* Update key pointer for next 16 bytes. */ + ADD r1, r1, r2 + LDR r3, [r1] + LDR r4, [r1, #4] + LDR r5, [r1, #8] + LDR r6, [r1, #12] +L_chacha_thumb2_setkey_same_keyb_ytes: + STM r0, {r3, r4, r5, r6} + POP {r4, r5, r6, r7, pc} + /* Cycle Count = 60 */ + .size wc_chacha_setkey,.-wc_chacha_setkey + .text + .align 4 + .globl wc_chacha_crypt_bytes + .type wc_chacha_crypt_bytes, %function +wc_chacha_crypt_bytes: + PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} + SUB sp, sp, #0x34 + MOV lr, r0 + STRD r0, r1, [sp, #32] + STRD r2, r3, [sp, #40] +L_chacha_thumb2_crypt_block: + /* Put x[12]..x[15] onto stack. */ + LDRD r4, r5, [lr, #48] + LDRD r6, r7, [lr, #56] + STRD r4, r5, [sp, #16] + STRD r6, r7, [sp, #24] + /* Load x[0]..x[12] into registers. */ + LDM lr, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12} + /* 10x 2 full rounds to perform. */ + MOV lr, #0xa + STR lr, [sp, #48] +L_chacha_thumb2_crypt_loop: + /* 0, 4, 8, 12 */ + /* 1, 5, 9, 13 */ + LDR lr, [sp, #20] + ADD r0, r0, r4 + ADD r1, r1, r5 + EOR r12, r12, r0 + EOR lr, lr, r1 + ROR r12, r12, #16 + ROR lr, lr, #16 + ADD r8, r8, r12 + ADD r9, r9, lr + EOR r4, r4, r8 + EOR r5, r5, r9 + ROR r4, r4, #20 + ROR r5, r5, #20 + ADD r0, r0, r4 + ADD r1, r1, r5 + EOR r12, r12, r0 + EOR lr, lr, r1 + ROR r12, r12, #24 + ROR lr, lr, #24 + ADD r8, r8, r12 + ADD r9, r9, lr + EOR r4, r4, r8 + EOR r5, r5, r9 + ROR r4, r4, #25 + ROR r5, r5, #25 + STR r12, [sp, #16] + STR lr, [sp, #20] + /* 2, 6, 10, 14 */ + /* 3, 7, 11, 15 */ + LDR r12, [sp, #24] + LDR lr, [sp, #28] + ADD r2, r2, r6 + ADD r3, r3, r7 + EOR r12, r12, r2 + EOR lr, lr, r3 + ROR r12, r12, #16 + ROR lr, lr, #16 + ADD r10, r10, r12 + ADD r11, r11, lr + EOR r6, r6, r10 + EOR r7, r7, r11 + ROR r6, r6, #20 + ROR r7, r7, #20 + ADD r2, r2, r6 + ADD r3, r3, r7 + EOR r12, r12, r2 + EOR lr, lr, r3 + ROR r12, r12, #24 + ROR lr, lr, #24 + ADD r10, r10, r12 + ADD r11, r11, lr + EOR r6, r6, r10 + EOR r7, r7, r11 + ROR r6, r6, #25 + ROR r7, r7, #25 + /* 3, 4, 9, 14 */ + /* 0, 5, 10, 15 */ + ADD r3, r3, r4 + ADD r0, r0, r5 + EOR r12, r12, r3 + EOR lr, lr, r0 + ROR r12, r12, #16 + ROR lr, lr, #16 + ADD r9, r9, r12 + ADD r10, r10, lr + EOR r4, r4, r9 + EOR r5, r5, r10 + ROR r4, r4, #20 + ROR r5, r5, #20 + ADD r3, r3, r4 + ADD r0, r0, r5 + EOR r12, r12, r3 + EOR lr, lr, r0 + ROR r12, r12, #24 + ROR lr, lr, #24 + ADD r9, r9, r12 + ADD r10, r10, lr + EOR r4, r4, r9 + EOR r5, r5, r10 + ROR r4, r4, #25 + ROR r5, r5, #25 + STR r12, [sp, #24] + STR lr, [sp, #28] + LDR r12, [sp, #16] + LDR lr, [sp, #20] + /* 1, 6, 11, 12 */ + /* 2, 7, 8, 13 */ + ADD r1, r1, r6 + ADD r2, r2, r7 + EOR r12, r12, r1 + EOR lr, lr, r2 + ROR r12, r12, #16 + ROR lr, lr, #16 + ADD r11, r11, r12 + ADD r8, r8, lr + EOR r6, r6, r11 + EOR r7, r7, r8 + ROR r6, r6, #20 + ROR r7, r7, #20 + ADD r1, r1, r6 + ADD r2, r2, r7 + EOR r12, r12, r1 + EOR lr, lr, r2 + ROR r12, r12, #24 + ROR lr, lr, #24 + ADD r11, r11, r12 + ADD r8, r8, lr + EOR r6, r6, r11 + EOR r7, r7, r8 + ROR r6, r6, #25 + ROR r7, r7, #25 + STR lr, [sp, #20] + /* Check if we have done enough rounds. */ + LDR lr, [sp, #48] + SUBS lr, lr, #0x1 + STR lr, [sp, #48] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BGT L_chacha_thumb2_crypt_loop +#else + BGT.N L_chacha_thumb2_crypt_loop +#endif + STM sp, {r8, r9, r10, r11, r12} + LDR lr, [sp, #32] + MOV r12, sp + /* Add in original state */ + LDM lr!, {r8, r9, r10, r11} + ADD r0, r0, r8 + ADD r1, r1, r9 + ADD r2, r2, r10 + ADD r3, r3, r11 + LDM lr!, {r8, r9, r10, r11} + ADD r4, r4, r8 + ADD r5, r5, r9 + ADD r6, r6, r10 + ADD r7, r7, r11 + LDM r12, {r8, r9} + LDM lr!, {r10, r11} + ADD r8, r8, r10 + ADD r9, r9, r11 + STM r12!, {r8, r9} + LDM r12, {r8, r9} + LDM lr!, {r10, r11} + ADD r8, r8, r10 + ADD r9, r9, r11 + STM r12!, {r8, r9} + LDM r12, {r8, r9} + LDM lr!, {r10, r11} + ADD r8, r8, r10 + ADD r9, r9, r11 + ADD r10, r10, #0x1 + STM r12!, {r8, r9} + STR r10, [lr, #-8] + LDM r12, {r8, r9} + LDM lr, {r10, r11} + ADD r8, r8, r10 + ADD r9, r9, r11 + STM r12, {r8, r9} + LDR r12, [sp, #44] + CMP r12, #0x40 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BLT L_chacha_thumb2_crypt_lt_block +#else + BLT.N L_chacha_thumb2_crypt_lt_block +#endif + LDR r12, [sp, #40] + LDR lr, [sp, #36] + /* XOR state into 64 bytes. */ + LDR r8, [r12] + LDR r9, [r12, #4] + LDR r10, [r12, #8] + LDR r11, [r12, #12] + EOR r0, r0, r8 + EOR r1, r1, r9 + EOR r2, r2, r10 + EOR r3, r3, r11 + STR r0, [lr] + STR r1, [lr, #4] + STR r2, [lr, #8] + STR r3, [lr, #12] + LDR r8, [r12, #16] + LDR r9, [r12, #20] + LDR r10, [r12, #24] + LDR r11, [r12, #28] + EOR r4, r4, r8 + EOR r5, r5, r9 + EOR r6, r6, r10 + EOR r7, r7, r11 + STR r4, [lr, #16] + STR r5, [lr, #20] + STR r6, [lr, #24] + STR r7, [lr, #28] + LDR r4, [sp] + LDR r5, [sp, #4] + LDR r6, [sp, #8] + LDR r7, [sp, #12] + LDR r8, [r12, #32] + LDR r9, [r12, #36] + LDR r10, [r12, #40] + LDR r11, [r12, #44] + EOR r4, r4, r8 + EOR r5, r5, r9 + EOR r6, r6, r10 + EOR r7, r7, r11 + STR r4, [lr, #32] + STR r5, [lr, #36] + STR r6, [lr, #40] + STR r7, [lr, #44] + LDR r4, [sp, #16] + LDR r5, [sp, #20] + LDR r6, [sp, #24] + LDR r7, [sp, #28] + LDR r8, [r12, #48] + LDR r9, [r12, #52] + LDR r10, [r12, #56] + LDR r11, [r12, #60] + EOR r4, r4, r8 + EOR r5, r5, r9 + EOR r6, r6, r10 + EOR r7, r7, r11 + STR r4, [lr, #48] + STR r5, [lr, #52] + STR r6, [lr, #56] + STR r7, [lr, #60] + LDR r3, [sp, #44] + ADD r12, r12, #0x40 + ADD lr, lr, #0x40 + STR r12, [sp, #40] + STR lr, [sp, #36] + SUBS r3, r3, #0x40 + LDR lr, [sp, #32] + STR r3, [sp, #44] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BNE L_chacha_thumb2_crypt_block +#else + BNE.N L_chacha_thumb2_crypt_block +#endif +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + B L_chacha_thumb2_crypt_done +#else + B.N L_chacha_thumb2_crypt_done +#endif +L_chacha_thumb2_crypt_lt_block: + /* Store in over field of ChaCha. */ + LDR lr, [sp, #32] + ADD r12, lr, #0x44 + STM r12!, {r0, r1, r2, r3, r4, r5, r6, r7} + LDM sp, {r0, r1, r2, r3, r4, r5, r6, r7} + STM r12, {r0, r1, r2, r3, r4, r5, r6, r7} + LDRD r2, r3, [sp, #40] + LDR r1, [sp, #36] + RSB r12, r3, #0x40 + STR r12, [lr, #64] + ADD lr, lr, #0x44 +L_chacha_thumb2_crypt_16byte_loop: + CMP r3, #0x10 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BLT L_chacha_thumb2_crypt_word_loop +#else + BLT.N L_chacha_thumb2_crypt_word_loop +#endif + /* 16 bytes of state XORed into message. */ + LDM lr!, {r4, r5, r6, r7} + LDR r8, [r2] + LDR r9, [r2, #4] + LDR r10, [r2, #8] + LDR r11, [r2, #12] + EOR r8, r8, r4 + EOR r9, r9, r5 + EOR r10, r10, r6 + EOR r11, r11, r7 + SUBS r3, r3, #0x10 + STR r8, [r1] + STR r9, [r1, #4] + STR r10, [r1, #8] + STR r11, [r1, #12] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_chacha_thumb2_crypt_done +#else + BEQ.N L_chacha_thumb2_crypt_done +#endif + ADD r2, r2, #0x10 + ADD r1, r1, #0x10 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + B L_chacha_thumb2_crypt_16byte_loop +#else + B.N L_chacha_thumb2_crypt_16byte_loop +#endif +L_chacha_thumb2_crypt_word_loop: + CMP r3, #0x4 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BLT L_chacha_thumb2_crypt_byte_start +#else + BLT.N L_chacha_thumb2_crypt_byte_start +#endif + /* 4 bytes of state XORed into message. */ + LDR r4, [lr] + LDR r8, [r2] + EOR r8, r8, r4 + SUBS r3, r3, #0x4 + STR r8, [r1] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_chacha_thumb2_crypt_done +#else + BEQ.N L_chacha_thumb2_crypt_done +#endif + ADD lr, lr, #0x4 + ADD r2, r2, #0x4 + ADD r1, r1, #0x4 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + B L_chacha_thumb2_crypt_word_loop +#else + B.N L_chacha_thumb2_crypt_word_loop +#endif +L_chacha_thumb2_crypt_byte_start: + LDR r4, [lr] +L_chacha_thumb2_crypt_byte_loop: + LDRB r8, [r2] + EOR r8, r8, r4 + SUBS r3, r3, #0x1 + STRB r8, [r1] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_chacha_thumb2_crypt_done +#else + BEQ.N L_chacha_thumb2_crypt_done +#endif + LSR r4, r4, #8 + ADD r2, r2, #0x1 + ADD r1, r1, #0x1 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + B L_chacha_thumb2_crypt_byte_loop +#else + B.N L_chacha_thumb2_crypt_byte_loop +#endif +L_chacha_thumb2_crypt_done: + ADD sp, sp, #0x34 + POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} + /* Cycle Count = 508 */ + .size wc_chacha_crypt_bytes,.-wc_chacha_crypt_bytes + .text + .align 4 + .globl wc_chacha_use_over + .type wc_chacha_use_over, %function +wc_chacha_use_over: + PUSH {r4, r5, r6, r7, r8, r9, r10, r11, lr} +L_chacha_thumb2_over_16byte_loop: + CMP r3, #0x10 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BLT L_chacha_thumb2_over_word_loop +#else + BLT.N L_chacha_thumb2_over_word_loop +#endif + /* 16 bytes of state XORed into message. */ + LDR r4, [r0] + LDR r5, [r0, #4] + LDR r6, [r0, #8] + LDR r7, [r0, #12] + LDR r8, [r2] + LDR r9, [r2, #4] + LDR r10, [r2, #8] + LDR r11, [r2, #12] + EOR r4, r4, r8 + EOR r5, r5, r9 + EOR r6, r6, r10 + EOR r7, r7, r11 + SUBS r3, r3, #0x10 + STR r4, [r1] + STR r5, [r1, #4] + STR r6, [r1, #8] + STR r7, [r1, #12] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_chacha_thumb2_over_done +#else + BEQ.N L_chacha_thumb2_over_done +#endif + ADD r0, r0, #0x10 + ADD r2, r2, #0x10 + ADD r1, r1, #0x10 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + B L_chacha_thumb2_over_16byte_loop +#else + B.N L_chacha_thumb2_over_16byte_loop +#endif +L_chacha_thumb2_over_word_loop: + CMP r3, #0x4 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BLT L_chacha_thumb2_over_byte_loop +#else + BLT.N L_chacha_thumb2_over_byte_loop +#endif + /* 4 bytes of state XORed into message. */ + LDR r4, [r0] + LDR r8, [r2] + EOR r4, r4, r8 + SUBS r3, r3, #0x4 + STR r4, [r1] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_chacha_thumb2_over_done +#else + BEQ.N L_chacha_thumb2_over_done +#endif + ADD r0, r0, #0x4 + ADD r2, r2, #0x4 + ADD r1, r1, #0x4 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + B L_chacha_thumb2_over_word_loop +#else + B.N L_chacha_thumb2_over_word_loop +#endif +L_chacha_thumb2_over_byte_loop: + /* 4 bytes of state XORed into message. */ + LDRB r4, [r0] + LDRB r8, [r2] + EOR r4, r4, r8 + SUBS r3, r3, #0x1 + STRB r4, [r1] +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + BEQ L_chacha_thumb2_over_done +#else + BEQ.N L_chacha_thumb2_over_done +#endif + ADD r0, r0, #0x1 + ADD r2, r2, #0x1 + ADD r1, r1, #0x1 +#if defined(__GNUC__) || defined(__ICCARM__) || defined(__IAR_SYSTEMS_ICC__) + B L_chacha_thumb2_over_byte_loop +#else + B.N L_chacha_thumb2_over_byte_loop +#endif +L_chacha_thumb2_over_done: + POP {r4, r5, r6, r7, r8, r9, r10, r11, pc} + /* Cycle Count = 108 */ + .size wc_chacha_use_over,.-wc_chacha_use_over +#endif /* HAVE_CHACHA */ +#endif /* !__aarch64__ && __thumb__ */ +#endif /* WOLFSSL_ARMASM */ + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits +#endif +#endif /* !WOLFSSL_ARMASM_INLINE */ diff --git a/wolfcrypt/src/port/arm/thumb2-chacha-asm_c.c b/wolfcrypt/src/port/arm/thumb2-chacha-asm_c.c new file mode 100644 index 000000000..0dcdc4e3e --- /dev/null +++ b/wolfcrypt/src/port/arm/thumb2-chacha-asm_c.c @@ -0,0 +1,731 @@ +/* thumb2-chacha-asm + * + * Copyright (C) 2006-2024 wolfSSL Inc. + * + * This file is part of wolfSSL. + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA + */ + +/* Generated using (from wolfssl): + * cd ../scripts + * ruby ./chacha/chacha.rb thumb2 ../wolfssl/wolfcrypt/src/port/arm/thumb2-chacha-asm.c + */ + +#ifdef HAVE_CONFIG_H + #include +#endif /* HAVE_CONFIG_H */ +#include +#include + +#ifdef WOLFSSL_ARMASM +#if !defined(__aarch64__) && defined(__thumb__) +#ifdef WOLFSSL_ARMASM_INLINE + +#ifdef __IAR_SYSTEMS_ICC__ +#define __asm__ asm +#define __volatile__ volatile +#define WOLFSSL_NO_VAR_ASSIGN_REG +#endif /* __IAR_SYSTEMS_ICC__ */ +#ifdef __KEIL__ +#define __asm__ __asm +#define __volatile__ volatile +#endif /* __KEIL__ */ +#ifdef HAVE_CHACHA +#include + +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG +void wc_chacha_setiv(word32* x_p, const byte* iv_p, word32 counter_p) +#else +void wc_chacha_setiv(word32* x, const byte* iv, word32 counter) +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ +{ +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + register word32* x __asm__ ("r0") = (word32*)x_p; + register const byte* iv __asm__ ("r1") = (const byte*)iv_p; + register word32 counter __asm__ ("r2") = (word32)counter_p; +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ + + __asm__ __volatile__ ( + "ADD r3, %[x], #0x34\n\t" + "LDR r4, [%[iv]]\n\t" + "LDR r5, [%[iv], #4]\n\t" + "LDR r6, [%[iv], #8]\n\t" + "STR %[counter], [%[x], #48]\n\t" +#ifdef BIG_ENDIAN_ORDER + "REV r4, r4\n\t" + "REV r5, r5\n\t" + "REV r6, r6\n\t" +#endif /* BIG_ENDIAN_ORDER */ + "STM r3, {r4, r5, r6}\n\t" + : [x] "+r" (x), [iv] "+r" (iv), [counter] "+r" (counter) + : + : "memory", "r3", "r4", "r5", "r6", "cc" + ); +} + +XALIGNED(16) static const uint32_t L_chacha_thumb2_constants[] = { + 0x61707865, 0x3120646e, 0x79622d36, 0x6b206574, + 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574, +}; + +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG +void wc_chacha_setkey(word32* x_p, const byte* key_p, word32 keySz_p) +#else +void wc_chacha_setkey(word32* x, const byte* key, word32 keySz) +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ +{ +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + register word32* x __asm__ ("r0") = (word32*)x_p; + register const byte* key __asm__ ("r1") = (const byte*)key_p; + register word32 keySz __asm__ ("r2") = (word32)keySz_p; + register uint32_t* L_chacha_thumb2_constants_c __asm__ ("r3") = (uint32_t*)&L_chacha_thumb2_constants; +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ + + __asm__ __volatile__ ( + "MOV r7, %[L_chacha_thumb2_constants]\n\t" + "SUBS %[keySz], %[keySz], #0x10\n\t" + "ADD r7, r7, %[keySz]\n\t" + /* Start state with constants */ + "LDM r7, {r3, r4, r5, r6}\n\t" + "STM %[x]!, {r3, r4, r5, r6}\n\t" + /* Next is first 16 bytes of key. */ + "LDR r3, [%[key]]\n\t" + "LDR r4, [%[key], #4]\n\t" + "LDR r5, [%[key], #8]\n\t" + "LDR r6, [%[key], #12]\n\t" +#ifdef BIG_ENDIAN_ORDER + "REV r3, r3\n\t" + "REV r4, r4\n\t" + "REV r5, r5\n\t" + "REV r6, r6\n\t" +#endif /* BIG_ENDIAN_ORDER */ + "STM %[x]!, {r3, r4, r5, r6}\n\t" + /* Next 16 bytes of key. */ +#if defined(__GNUC__) + "BEQ L_chacha_thumb2_setkey_same_keyb_ytes_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_chacha_thumb2_setkey_same_keyb_ytes\n\t" +#else + "BEQ.N L_chacha_thumb2_setkey_same_keyb_ytes_%=\n\t" +#endif + /* Update key pointer for next 16 bytes. */ + "ADD %[key], %[key], %[keySz]\n\t" + "LDR r3, [%[key]]\n\t" + "LDR r4, [%[key], #4]\n\t" + "LDR r5, [%[key], #8]\n\t" + "LDR r6, [%[key], #12]\n\t" + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_setkey_same_keyb_ytes:\n\t" +#else + "L_chacha_thumb2_setkey_same_keyb_ytes_%=:\n\t" +#endif + "STM %[x], {r3, r4, r5, r6}\n\t" +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + : [x] "+r" (x), [key] "+r" (key), [keySz] "+r" (keySz), + [L_chacha_thumb2_constants] "+r" (L_chacha_thumb2_constants_c) + : + : "memory", "r4", "r5", "r6", "r7", "cc" +#else + : [x] "+r" (x), [key] "+r" (key), [keySz] "+r" (keySz) + : [L_chacha_thumb2_constants] "r" (L_chacha_thumb2_constants) + : "memory", "r4", "r5", "r6", "r7", "cc" +#endif /* WOLFSSL_NO_VAR_ASSIGN_REG */ + ); +} + +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG +void wc_chacha_crypt_bytes(ChaCha* ctx_p, byte* c_p, const byte* m_p, word32 len_p) +#else +void wc_chacha_crypt_bytes(ChaCha* ctx, byte* c, const byte* m, word32 len) +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ +{ +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + register ChaCha* ctx __asm__ ("r0") = (ChaCha*)ctx_p; + register byte* c __asm__ ("r1") = (byte*)c_p; + register const byte* m __asm__ ("r2") = (const byte*)m_p; + register word32 len __asm__ ("r3") = (word32)len_p; +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ + + __asm__ __volatile__ ( + "SUB sp, sp, #0x34\n\t" + "MOV lr, %[ctx]\n\t" + "STRD %[ctx], %[c], [sp, #32]\n\t" + "STRD %[m], %[len], [sp, #40]\n\t" + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_block:\n\t" +#else + "L_chacha_thumb2_crypt_block_%=:\n\t" +#endif + /* Put x[12]..x[15] onto stack. */ + "LDRD r4, r5, [lr, #48]\n\t" + "LDRD r6, r7, [lr, #56]\n\t" + "STRD r4, r5, [sp, #16]\n\t" + "STRD r6, r7, [sp, #24]\n\t" + /* Load x[0]..x[12] into registers. */ + "LDM lr, {%[ctx], %[c], %[m], %[len], r4, r5, r6, r7, r8, r9, r10, r11, r12}\n\t" + /* 10x 2 full rounds to perform. */ + "MOV lr, #0xa\n\t" + "STR lr, [sp, #48]\n\t" + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_loop:\n\t" +#else + "L_chacha_thumb2_crypt_loop_%=:\n\t" +#endif + /* 0, 4, 8, 12 */ + /* 1, 5, 9, 13 */ + "LDR lr, [sp, #20]\n\t" + "ADD %[ctx], %[ctx], r4\n\t" + "ADD %[c], %[c], r5\n\t" + "EOR r12, r12, %[ctx]\n\t" + "EOR lr, lr, %[c]\n\t" + "ROR r12, r12, #16\n\t" + "ROR lr, lr, #16\n\t" + "ADD r8, r8, r12\n\t" + "ADD r9, r9, lr\n\t" + "EOR r4, r4, r8\n\t" + "EOR r5, r5, r9\n\t" + "ROR r4, r4, #20\n\t" + "ROR r5, r5, #20\n\t" + "ADD %[ctx], %[ctx], r4\n\t" + "ADD %[c], %[c], r5\n\t" + "EOR r12, r12, %[ctx]\n\t" + "EOR lr, lr, %[c]\n\t" + "ROR r12, r12, #24\n\t" + "ROR lr, lr, #24\n\t" + "ADD r8, r8, r12\n\t" + "ADD r9, r9, lr\n\t" + "EOR r4, r4, r8\n\t" + "EOR r5, r5, r9\n\t" + "ROR r4, r4, #25\n\t" + "ROR r5, r5, #25\n\t" + "STR r12, [sp, #16]\n\t" + "STR lr, [sp, #20]\n\t" + /* 2, 6, 10, 14 */ + /* 3, 7, 11, 15 */ + "LDR r12, [sp, #24]\n\t" + "LDR lr, [sp, #28]\n\t" + "ADD %[m], %[m], r6\n\t" + "ADD %[len], %[len], r7\n\t" + "EOR r12, r12, %[m]\n\t" + "EOR lr, lr, %[len]\n\t" + "ROR r12, r12, #16\n\t" + "ROR lr, lr, #16\n\t" + "ADD r10, r10, r12\n\t" + "ADD r11, r11, lr\n\t" + "EOR r6, r6, r10\n\t" + "EOR r7, r7, r11\n\t" + "ROR r6, r6, #20\n\t" + "ROR r7, r7, #20\n\t" + "ADD %[m], %[m], r6\n\t" + "ADD %[len], %[len], r7\n\t" + "EOR r12, r12, %[m]\n\t" + "EOR lr, lr, %[len]\n\t" + "ROR r12, r12, #24\n\t" + "ROR lr, lr, #24\n\t" + "ADD r10, r10, r12\n\t" + "ADD r11, r11, lr\n\t" + "EOR r6, r6, r10\n\t" + "EOR r7, r7, r11\n\t" + "ROR r6, r6, #25\n\t" + "ROR r7, r7, #25\n\t" + /* 3, 4, 9, 14 */ + /* 0, 5, 10, 15 */ + "ADD %[len], %[len], r4\n\t" + "ADD %[ctx], %[ctx], r5\n\t" + "EOR r12, r12, %[len]\n\t" + "EOR lr, lr, %[ctx]\n\t" + "ROR r12, r12, #16\n\t" + "ROR lr, lr, #16\n\t" + "ADD r9, r9, r12\n\t" + "ADD r10, r10, lr\n\t" + "EOR r4, r4, r9\n\t" + "EOR r5, r5, r10\n\t" + "ROR r4, r4, #20\n\t" + "ROR r5, r5, #20\n\t" + "ADD %[len], %[len], r4\n\t" + "ADD %[ctx], %[ctx], r5\n\t" + "EOR r12, r12, %[len]\n\t" + "EOR lr, lr, %[ctx]\n\t" + "ROR r12, r12, #24\n\t" + "ROR lr, lr, #24\n\t" + "ADD r9, r9, r12\n\t" + "ADD r10, r10, lr\n\t" + "EOR r4, r4, r9\n\t" + "EOR r5, r5, r10\n\t" + "ROR r4, r4, #25\n\t" + "ROR r5, r5, #25\n\t" + "STR r12, [sp, #24]\n\t" + "STR lr, [sp, #28]\n\t" + "LDR r12, [sp, #16]\n\t" + "LDR lr, [sp, #20]\n\t" + /* 1, 6, 11, 12 */ + /* 2, 7, 8, 13 */ + "ADD %[c], %[c], r6\n\t" + "ADD %[m], %[m], r7\n\t" + "EOR r12, r12, %[c]\n\t" + "EOR lr, lr, %[m]\n\t" + "ROR r12, r12, #16\n\t" + "ROR lr, lr, #16\n\t" + "ADD r11, r11, r12\n\t" + "ADD r8, r8, lr\n\t" + "EOR r6, r6, r11\n\t" + "EOR r7, r7, r8\n\t" + "ROR r6, r6, #20\n\t" + "ROR r7, r7, #20\n\t" + "ADD %[c], %[c], r6\n\t" + "ADD %[m], %[m], r7\n\t" + "EOR r12, r12, %[c]\n\t" + "EOR lr, lr, %[m]\n\t" + "ROR r12, r12, #24\n\t" + "ROR lr, lr, #24\n\t" + "ADD r11, r11, r12\n\t" + "ADD r8, r8, lr\n\t" + "EOR r6, r6, r11\n\t" + "EOR r7, r7, r8\n\t" + "ROR r6, r6, #25\n\t" + "ROR r7, r7, #25\n\t" + "STR lr, [sp, #20]\n\t" + /* Check if we have done enough rounds. */ + "LDR lr, [sp, #48]\n\t" + "SUBS lr, lr, #0x1\n\t" + "STR lr, [sp, #48]\n\t" +#if defined(__GNUC__) + "BGT L_chacha_thumb2_crypt_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BGT.N L_chacha_thumb2_crypt_loop\n\t" +#else + "BGT.N L_chacha_thumb2_crypt_loop_%=\n\t" +#endif + "STM sp, {r8, r9, r10, r11, r12}\n\t" + "LDR lr, [sp, #32]\n\t" + "MOV r12, sp\n\t" + /* Add in original state */ + "LDM lr!, {r8, r9, r10, r11}\n\t" + "ADD %[ctx], %[ctx], r8\n\t" + "ADD %[c], %[c], r9\n\t" + "ADD %[m], %[m], r10\n\t" + "ADD %[len], %[len], r11\n\t" + "LDM lr!, {r8, r9, r10, r11}\n\t" + "ADD r4, r4, r8\n\t" + "ADD r5, r5, r9\n\t" + "ADD r6, r6, r10\n\t" + "ADD r7, r7, r11\n\t" + "LDM r12, {r8, r9}\n\t" + "LDM lr!, {r10, r11}\n\t" + "ADD r8, r8, r10\n\t" + "ADD r9, r9, r11\n\t" + "STM r12!, {r8, r9}\n\t" + "LDM r12, {r8, r9}\n\t" + "LDM lr!, {r10, r11}\n\t" + "ADD r8, r8, r10\n\t" + "ADD r9, r9, r11\n\t" + "STM r12!, {r8, r9}\n\t" + "LDM r12, {r8, r9}\n\t" + "LDM lr!, {r10, r11}\n\t" + "ADD r8, r8, r10\n\t" + "ADD r9, r9, r11\n\t" + "ADD r10, r10, #0x1\n\t" + "STM r12!, {r8, r9}\n\t" + "STR r10, [lr, #-8]\n\t" + "LDM r12, {r8, r9}\n\t" + "LDM lr, {r10, r11}\n\t" + "ADD r8, r8, r10\n\t" + "ADD r9, r9, r11\n\t" + "STM r12, {r8, r9}\n\t" + "LDR r12, [sp, #44]\n\t" + "CMP r12, #0x40\n\t" +#if defined(__GNUC__) + "BLT L_chacha_thumb2_crypt_lt_block_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BLT.N L_chacha_thumb2_crypt_lt_block\n\t" +#else + "BLT.N L_chacha_thumb2_crypt_lt_block_%=\n\t" +#endif + "LDR r12, [sp, #40]\n\t" + "LDR lr, [sp, #36]\n\t" + /* XOR state into 64 bytes. */ + "LDR r8, [r12]\n\t" + "LDR r9, [r12, #4]\n\t" + "LDR r10, [r12, #8]\n\t" + "LDR r11, [r12, #12]\n\t" + "EOR %[ctx], %[ctx], r8\n\t" + "EOR %[c], %[c], r9\n\t" + "EOR %[m], %[m], r10\n\t" + "EOR %[len], %[len], r11\n\t" + "STR %[ctx], [lr]\n\t" + "STR %[c], [lr, #4]\n\t" + "STR %[m], [lr, #8]\n\t" + "STR %[len], [lr, #12]\n\t" + "LDR r8, [r12, #16]\n\t" + "LDR r9, [r12, #20]\n\t" + "LDR r10, [r12, #24]\n\t" + "LDR r11, [r12, #28]\n\t" + "EOR r4, r4, r8\n\t" + "EOR r5, r5, r9\n\t" + "EOR r6, r6, r10\n\t" + "EOR r7, r7, r11\n\t" + "STR r4, [lr, #16]\n\t" + "STR r5, [lr, #20]\n\t" + "STR r6, [lr, #24]\n\t" + "STR r7, [lr, #28]\n\t" + "LDR r4, [sp]\n\t" + "LDR r5, [sp, #4]\n\t" + "LDR r6, [sp, #8]\n\t" + "LDR r7, [sp, #12]\n\t" + "LDR r8, [r12, #32]\n\t" + "LDR r9, [r12, #36]\n\t" + "LDR r10, [r12, #40]\n\t" + "LDR r11, [r12, #44]\n\t" + "EOR r4, r4, r8\n\t" + "EOR r5, r5, r9\n\t" + "EOR r6, r6, r10\n\t" + "EOR r7, r7, r11\n\t" + "STR r4, [lr, #32]\n\t" + "STR r5, [lr, #36]\n\t" + "STR r6, [lr, #40]\n\t" + "STR r7, [lr, #44]\n\t" + "LDR r4, [sp, #16]\n\t" + "LDR r5, [sp, #20]\n\t" + "LDR r6, [sp, #24]\n\t" + "LDR r7, [sp, #28]\n\t" + "LDR r8, [r12, #48]\n\t" + "LDR r9, [r12, #52]\n\t" + "LDR r10, [r12, #56]\n\t" + "LDR r11, [r12, #60]\n\t" + "EOR r4, r4, r8\n\t" + "EOR r5, r5, r9\n\t" + "EOR r6, r6, r10\n\t" + "EOR r7, r7, r11\n\t" + "STR r4, [lr, #48]\n\t" + "STR r5, [lr, #52]\n\t" + "STR r6, [lr, #56]\n\t" + "STR r7, [lr, #60]\n\t" + "LDR %[len], [sp, #44]\n\t" + "ADD r12, r12, #0x40\n\t" + "ADD lr, lr, #0x40\n\t" + "STR r12, [sp, #40]\n\t" + "STR lr, [sp, #36]\n\t" + "SUBS %[len], %[len], #0x40\n\t" + "LDR lr, [sp, #32]\n\t" + "STR %[len], [sp, #44]\n\t" +#if defined(__GNUC__) + "BNE L_chacha_thumb2_crypt_block_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BNE.N L_chacha_thumb2_crypt_block\n\t" +#else + "BNE.N L_chacha_thumb2_crypt_block_%=\n\t" +#endif +#if defined(__GNUC__) + "B L_chacha_thumb2_crypt_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "B.N L_chacha_thumb2_crypt_done\n\t" +#else + "B.N L_chacha_thumb2_crypt_done_%=\n\t" +#endif + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_lt_block:\n\t" +#else + "L_chacha_thumb2_crypt_lt_block_%=:\n\t" +#endif + /* Store in over field of ChaCha. */ + "LDR lr, [sp, #32]\n\t" + "ADD r12, lr, #0x44\n\t" + "STM r12!, {%[ctx], %[c], %[m], %[len], r4, r5, r6, r7}\n\t" + "LDM sp, {%[ctx], %[c], %[m], %[len], r4, r5, r6, r7}\n\t" + "STM r12, {%[ctx], %[c], %[m], %[len], r4, r5, r6, r7}\n\t" + "LDRD %[m], %[len], [sp, #40]\n\t" + "LDR %[c], [sp, #36]\n\t" + "RSB r12, %[len], #0x40\n\t" + "STR r12, [lr, #64]\n\t" + "ADD lr, lr, #0x44\n\t" + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_16byte_loop:\n\t" +#else + "L_chacha_thumb2_crypt_16byte_loop_%=:\n\t" +#endif + "CMP %[len], #0x10\n\t" +#if defined(__GNUC__) + "BLT L_chacha_thumb2_crypt_word_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BLT.N L_chacha_thumb2_crypt_word_loop\n\t" +#else + "BLT.N L_chacha_thumb2_crypt_word_loop_%=\n\t" +#endif + /* 16 bytes of state XORed into message. */ + "LDM lr!, {r4, r5, r6, r7}\n\t" + "LDR r8, [%[m]]\n\t" + "LDR r9, [%[m], #4]\n\t" + "LDR r10, [%[m], #8]\n\t" + "LDR r11, [%[m], #12]\n\t" + "EOR r8, r8, r4\n\t" + "EOR r9, r9, r5\n\t" + "EOR r10, r10, r6\n\t" + "EOR r11, r11, r7\n\t" + "SUBS %[len], %[len], #0x10\n\t" + "STR r8, [%[c]]\n\t" + "STR r9, [%[c], #4]\n\t" + "STR r10, [%[c], #8]\n\t" + "STR r11, [%[c], #12]\n\t" +#if defined(__GNUC__) + "BEQ L_chacha_thumb2_crypt_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_chacha_thumb2_crypt_done\n\t" +#else + "BEQ.N L_chacha_thumb2_crypt_done_%=\n\t" +#endif + "ADD %[m], %[m], #0x10\n\t" + "ADD %[c], %[c], #0x10\n\t" +#if defined(__GNUC__) + "B L_chacha_thumb2_crypt_16byte_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "B.N L_chacha_thumb2_crypt_16byte_loop\n\t" +#else + "B.N L_chacha_thumb2_crypt_16byte_loop_%=\n\t" +#endif + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_word_loop:\n\t" +#else + "L_chacha_thumb2_crypt_word_loop_%=:\n\t" +#endif + "CMP %[len], #0x4\n\t" +#if defined(__GNUC__) + "BLT L_chacha_thumb2_crypt_byte_start_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BLT.N L_chacha_thumb2_crypt_byte_start\n\t" +#else + "BLT.N L_chacha_thumb2_crypt_byte_start_%=\n\t" +#endif + /* 4 bytes of state XORed into message. */ + "LDR r4, [lr]\n\t" + "LDR r8, [%[m]]\n\t" + "EOR r8, r8, r4\n\t" + "SUBS %[len], %[len], #0x4\n\t" + "STR r8, [%[c]]\n\t" +#if defined(__GNUC__) + "BEQ L_chacha_thumb2_crypt_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_chacha_thumb2_crypt_done\n\t" +#else + "BEQ.N L_chacha_thumb2_crypt_done_%=\n\t" +#endif + "ADD lr, lr, #0x4\n\t" + "ADD %[m], %[m], #0x4\n\t" + "ADD %[c], %[c], #0x4\n\t" +#if defined(__GNUC__) + "B L_chacha_thumb2_crypt_word_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "B.N L_chacha_thumb2_crypt_word_loop\n\t" +#else + "B.N L_chacha_thumb2_crypt_word_loop_%=\n\t" +#endif + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_byte_start:\n\t" +#else + "L_chacha_thumb2_crypt_byte_start_%=:\n\t" +#endif + "LDR r4, [lr]\n\t" + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_byte_loop:\n\t" +#else + "L_chacha_thumb2_crypt_byte_loop_%=:\n\t" +#endif + "LDRB r8, [%[m]]\n\t" + "EOR r8, r8, r4\n\t" + "SUBS %[len], %[len], #0x1\n\t" + "STRB r8, [%[c]]\n\t" +#if defined(__GNUC__) + "BEQ L_chacha_thumb2_crypt_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_chacha_thumb2_crypt_done\n\t" +#else + "BEQ.N L_chacha_thumb2_crypt_done_%=\n\t" +#endif + "LSR r4, r4, #8\n\t" + "ADD %[m], %[m], #0x1\n\t" + "ADD %[c], %[c], #0x1\n\t" +#if defined(__GNUC__) + "B L_chacha_thumb2_crypt_byte_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "B.N L_chacha_thumb2_crypt_byte_loop\n\t" +#else + "B.N L_chacha_thumb2_crypt_byte_loop_%=\n\t" +#endif + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_crypt_done:\n\t" +#else + "L_chacha_thumb2_crypt_done_%=:\n\t" +#endif + "ADD sp, sp, #0x34\n\t" + : [ctx] "+r" (ctx), [c] "+r" (c), [m] "+r" (m), [len] "+r" (len) + : + : "memory", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "lr", "cc" + ); +} + +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG +void wc_chacha_use_over(byte* over_p, byte* output_p, const byte* input_p, word32 len_p) +#else +void wc_chacha_use_over(byte* over, byte* output, const byte* input, word32 len) +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ +{ +#ifndef WOLFSSL_NO_VAR_ASSIGN_REG + register byte* over __asm__ ("r0") = (byte*)over_p; + register byte* output __asm__ ("r1") = (byte*)output_p; + register const byte* input __asm__ ("r2") = (const byte*)input_p; + register word32 len __asm__ ("r3") = (word32)len_p; +#endif /* !WOLFSSL_NO_VAR_ASSIGN_REG */ + + __asm__ __volatile__ ( + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_over_16byte_loop:\n\t" +#else + "L_chacha_thumb2_over_16byte_loop_%=:\n\t" +#endif + "CMP %[len], #0x10\n\t" +#if defined(__GNUC__) + "BLT L_chacha_thumb2_over_word_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BLT.N L_chacha_thumb2_over_word_loop\n\t" +#else + "BLT.N L_chacha_thumb2_over_word_loop_%=\n\t" +#endif + /* 16 bytes of state XORed into message. */ + "LDR r4, [%[over]]\n\t" + "LDR r5, [%[over], #4]\n\t" + "LDR r6, [%[over], #8]\n\t" + "LDR r7, [%[over], #12]\n\t" + "LDR r8, [%[input]]\n\t" + "LDR r9, [%[input], #4]\n\t" + "LDR r10, [%[input], #8]\n\t" + "LDR r11, [%[input], #12]\n\t" + "EOR r4, r4, r8\n\t" + "EOR r5, r5, r9\n\t" + "EOR r6, r6, r10\n\t" + "EOR r7, r7, r11\n\t" + "SUBS %[len], %[len], #0x10\n\t" + "STR r4, [%[output]]\n\t" + "STR r5, [%[output], #4]\n\t" + "STR r6, [%[output], #8]\n\t" + "STR r7, [%[output], #12]\n\t" +#if defined(__GNUC__) + "BEQ L_chacha_thumb2_over_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_chacha_thumb2_over_done\n\t" +#else + "BEQ.N L_chacha_thumb2_over_done_%=\n\t" +#endif + "ADD %[over], %[over], #0x10\n\t" + "ADD %[input], %[input], #0x10\n\t" + "ADD %[output], %[output], #0x10\n\t" +#if defined(__GNUC__) + "B L_chacha_thumb2_over_16byte_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "B.N L_chacha_thumb2_over_16byte_loop\n\t" +#else + "B.N L_chacha_thumb2_over_16byte_loop_%=\n\t" +#endif + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_over_word_loop:\n\t" +#else + "L_chacha_thumb2_over_word_loop_%=:\n\t" +#endif + "CMP %[len], #0x4\n\t" +#if defined(__GNUC__) + "BLT L_chacha_thumb2_over_byte_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BLT.N L_chacha_thumb2_over_byte_loop\n\t" +#else + "BLT.N L_chacha_thumb2_over_byte_loop_%=\n\t" +#endif + /* 4 bytes of state XORed into message. */ + "LDR r4, [%[over]]\n\t" + "LDR r8, [%[input]]\n\t" + "EOR r4, r4, r8\n\t" + "SUBS %[len], %[len], #0x4\n\t" + "STR r4, [%[output]]\n\t" +#if defined(__GNUC__) + "BEQ L_chacha_thumb2_over_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_chacha_thumb2_over_done\n\t" +#else + "BEQ.N L_chacha_thumb2_over_done_%=\n\t" +#endif + "ADD %[over], %[over], #0x4\n\t" + "ADD %[input], %[input], #0x4\n\t" + "ADD %[output], %[output], #0x4\n\t" +#if defined(__GNUC__) + "B L_chacha_thumb2_over_word_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "B.N L_chacha_thumb2_over_word_loop\n\t" +#else + "B.N L_chacha_thumb2_over_word_loop_%=\n\t" +#endif + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_over_byte_loop:\n\t" +#else + "L_chacha_thumb2_over_byte_loop_%=:\n\t" +#endif + /* 4 bytes of state XORed into message. */ + "LDRB r4, [%[over]]\n\t" + "LDRB r8, [%[input]]\n\t" + "EOR r4, r4, r8\n\t" + "SUBS %[len], %[len], #0x1\n\t" + "STRB r4, [%[output]]\n\t" +#if defined(__GNUC__) + "BEQ L_chacha_thumb2_over_done_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "BEQ.N L_chacha_thumb2_over_done\n\t" +#else + "BEQ.N L_chacha_thumb2_over_done_%=\n\t" +#endif + "ADD %[over], %[over], #0x1\n\t" + "ADD %[input], %[input], #0x1\n\t" + "ADD %[output], %[output], #0x1\n\t" +#if defined(__GNUC__) + "B L_chacha_thumb2_over_byte_loop_%=\n\t" +#elif defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "B.N L_chacha_thumb2_over_byte_loop\n\t" +#else + "B.N L_chacha_thumb2_over_byte_loop_%=\n\t" +#endif + "\n" +#if defined(__IAR_SYSTEMS_ICC__) && (__VER__ < 9000000) + "L_chacha_thumb2_over_done:\n\t" +#else + "L_chacha_thumb2_over_done_%=:\n\t" +#endif + : [over] "+r" (over), [output] "+r" (output), [input] "+r" (input), [len] "+r" (len) + : + : "memory", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "cc" + ); +} + +#endif /* HAVE_CHACHA */ +#endif /* !__aarch64__ && __thumb__ */ +#endif /* WOLFSSL_ARMASM */ +#endif /* WOLFSSL_ARMASM_INLINE */ diff --git a/wolfcrypt/src/port/arm/thumb2-chacha.c b/wolfcrypt/src/port/arm/thumb2-chacha.c new file mode 100644 index 000000000..5e8e323ae --- /dev/null +++ b/wolfcrypt/src/port/arm/thumb2-chacha.c @@ -0,0 +1,187 @@ +/* thumb2-chacha.c + * + * Copyright (C) 2006-2024 wolfSSL Inc. + * + * This file is part of wolfSSL. + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA + */ + + +#ifdef HAVE_CONFIG_H + #include +#endif + +#include + +#if defined(WOLFSSL_ARMASM) && defined(__thumb__) +#ifdef HAVE_CHACHA + +#include +#include +#include +#include +#ifdef NO_INLINE + #include +#else + #define WOLFSSL_MISC_INCLUDED + #include +#endif + +#ifdef CHACHA_AEAD_TEST + #include +#endif + +#ifdef CHACHA_TEST + #include +#endif + + +extern void wc_chacha_setiv(word32* x, const byte* iv, word32 counter); + +/* Set the Initialization Vector (IV) and counter into ChaCha context. + * + * Set up iv(nonce). Earlier versions used 64 bits instead of 96, this version + * uses the typical AEAD 96 bit nonce and can do record sizes of 256 GB. + * + * @param [in] ctx ChaCha context. + * @param [in] iv IV to set. + * @param [in] counter Starting value of counter. + * @return 0 on success. + * @return BAD_FUNC_ARG when ctx or IV is NULL. + */ +int wc_Chacha_SetIV(ChaCha* ctx, const byte* iv, word32 counter) +{ + int ret = 0; +#ifdef CHACHA_AEAD_TEST + word32 i; + + printf("NONCE : "); + if (iv != NULL) { + for (i = 0; i < CHACHA_IV_BYTES; i++) { + printf("%02x", iv[i]); + } + } + printf("\n\n"); +#endif + + /* Validate parameters. */ + if ((ctx == NULL) || (iv == NULL)) { + ret = BAD_FUNC_ARG; + } + if (ret == 0) { + /* No unused bytes to XOR into input. */ + ctx->left = 0; + + /* Set counter and IV into state. */ + wc_chacha_setiv(ctx->X, iv, counter); + } + + return ret; +} + +extern void wc_chacha_setkey(word32* x, const byte* key, word32 keySz); + +/* Set the key into the ChaCha context. + * + * Key setup. 8 word iv (nonce) + * + * @param [in] ctx ChaCha context. + * @param [in] key Key to set. + * @param [in] keySz Length of key in bytes. Valid values: + * CHACHA_MAX_KEY_SZ and (CHACHA_MAX_KEY_SZ / 2) + * @return 0 on success. + * @return BAD_FUNC_ARG when ctx or key is NULL. + * @return BAD_FUNC_ARG when keySz is invalid. + */ +int wc_Chacha_SetKey(ChaCha* ctx, const byte* key, word32 keySz) +{ + int ret = 0; + +#ifdef CHACHA_AEAD_TEST + printf("ChaCha key used :\n"); + if (key != NULL) { + word32 i; + for (i = 0; i < keySz; i++) { + printf("%02x", key[i]); + if ((i % 8) == 7) + printf("\n"); + } + } + printf("\n\n"); +#endif + + /* Validate parameters. */ + if ((ctx == NULL) || (key == NULL)) { + ret = BAD_FUNC_ARG; + } + else if ((keySz != (CHACHA_MAX_KEY_SZ / 2)) && + (keySz != CHACHA_MAX_KEY_SZ )) { + ret = BAD_FUNC_ARG; + } + + if (ret == 0) { + ctx->left = 0; + + wc_chacha_setkey(ctx->X, key, keySz); + } + + return ret; +} + +extern void wc_chacha_use_over(byte* over, byte* output, const byte* input, + word32 len); +extern void wc_chacha_crypt_bytes(ChaCha* ctx, byte* c, const byte* m, + word32 len); + +/* API to encrypt/decrypt a message of any size. + * + * @param [in] ctx ChaCha context. + * @param [out] output Enciphered output. + * @param [in] input Input to encipher. + * @param [in] len Length of input in bytes. + * @return 0 on success. + * @return BAD_FUNC_ARG when ctx, output or input is NULL. + */ +int wc_Chacha_Process(ChaCha* ctx, byte* output, const byte* input, word32 len) +{ + int ret = 0; + + if ((ctx == NULL) || (output == NULL) || (input == NULL)) { + ret = BAD_FUNC_ARG; + } + + /* Handle left over bytes from last block. */ + if ((ret == 0) && (len > 0) && (ctx->left > 0)) { + byte* over = ((byte*)ctx->over) + CHACHA_CHUNK_BYTES - ctx->left; + word32 l = min(len, ctx->left); + + wc_chacha_use_over(over, output, input, l); + + ctx->left -= l; + input += l; + output += l; + len -= l; + } + + if ((ret == 0) && (len != 0)) { + wc_chacha_crypt_bytes(ctx, output, input, len); + } + + return ret; +} + +#endif /* HAVE_CHACHA */ +#endif /* WOLFSSL_ARMASM && !WOLFSSL_ARMASM_NO_NEON */