diff --git a/configure.ac b/configure.ac index 1da02cc48..dbb7c5977 100644 --- a/configure.ac +++ b/configure.ac @@ -745,7 +745,7 @@ then test "$enable_psk" = "" && enable_psk=yes test "$enable_cmac" = "" && enable_cmac=yes test "$enable_siphash" = "" && enable_siphash=yes - test "$enable_xts" = "" && enable_xts=yes + test "$enable_aesxts" = "" && enable_aesxts=yes test "$enable_ocsp" = "" && enable_ocsp=yes test "$enable_ocspstapling" = "" && test "$enable_ocsp" != "no" && enable_ocspstapling=yes test "$enable_ocspstapling2" = "" && test "$enable_ocsp" != "no" && enable_ocspstapling2=yes @@ -933,7 +933,7 @@ then test "$enable_psk" = "" && enable_psk=yes test "$enable_cmac" = "" && enable_cmac=yes test "$enable_siphash" = "" && enable_siphash=yes - test "$enable_xts" = "" && enable_xts=yes + test "$enable_aesxts" = "" && enable_aesxts=yes test "$enable_ocsp" = "" && enable_ocsp=yes test "$enable_ocspstapling" = "" && test "$enable_ocsp" != "no" && enable_ocspstapling=yes test "$enable_ocspstapling2" = "" && test "$enable_ocsp" != "no" && enable_ocspstapling2=yes @@ -4836,17 +4836,23 @@ AS_IF([test "x$ENABLED_CMAC" = "xyes"], # AES-XTS -AC_ARG_ENABLE([xts], - [AS_HELP_STRING([--enable-xts],[Enable XTS (default: disabled)])], - [ ENABLED_XTS=$enableval ], - [ ENABLED_XTS=no ] +AC_ARG_ENABLE([aesxts], + [AS_HELP_STRING([--enable-aesxts],[Enable AES XTS (default: disabled)])], + [ ENABLED_AESXTS=$enableval ], + [ ENABLED_AESXTS=no ] ) -AS_IF([test "x$ENABLED_XTS" = "xyes"], +# legacy old option name, for compatibility: +AC_ARG_ENABLE([xts], + [AS_HELP_STRING([--enable-xts],[Please use --enable-aesxts])], + [ ENABLED_AESXTS=$enableval ] + ) + +AS_IF([test "x$ENABLED_AESXTS" = "xyes"], [AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_AES_XTS -DWOLFSSL_AES_DIRECT"]) -AS_IF([test "x$ENABLED_XTS" = "xyes" && test "x$ENABLED_INTELASM" = "xyes"], +AS_IF([test "x$ENABLED_AESXTS" = "xyes" && test "x$ENABLED_INTELASM" = "xyes"], [AM_CCASFLAGS="$AM_CCASFLAGS -DWOLFSSL_AES_XTS"]) -AS_IF([test "x$ENABLED_XTS" = "xyes" && test "x$ENABLED_AESNI" = "xyes"], +AS_IF([test "x$ENABLED_AESXTS" = "xyes" && test "x$ENABLED_AESNI" = "xyes"], [AM_CCASFLAGS="$AM_CCASFLAGS -DWOLFSSL_AES_XTS"]) # Web Server Build @@ -8028,6 +8034,37 @@ if test -n "$MPI_MAX_KEY_BITS" -o -n "$WITH_MAX_ECC_BITS"; then fi fi +AC_ARG_ENABLE([linuxkm-lkcapi-register], + [AS_HELP_STRING([--enable-linuxkm-lkcapi-register],[Register wolfCrypt implementations with the Linux Kernel Crypto API backplane. Possible values are "none", "all", "cbc(aes)", "cfb(aes)", "gcm(aes)", and "xts(aes)", or a comma-separate combination. (default: none)])], + [ENABLED_LINUXKM_LKCAPI_REGISTER=$enableval], + [ENABLED_LINUXKM_LKCAPI_REGISTER=none] + ) +if test "$ENABLED_LINUXKM_LKCAPI_REGISTER" != "none" +then + AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER" + + if test "$ENABLED_AESGCM" != "no" && test "$ENABLED_AESGCM_STREAM" = "no" && test "$ENABLED_AESNI" = "no" && test "$ENABLED_ARMASM" = "no" && test "$ENABLED_FIPS" = "no"; then + ENABLED_AESGCM_STREAM=yes + fi + + for lkcapi_alg in $(echo "$ENABLED_LINUXKM_LKCAPI_REGISTER" | tr ',' ' ') + do + case "$lkcapi_alg" in + all) AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_ALL" ;; + 'cbc(aes)') test "$ENABLED_AESCBC" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CBC implementation not enabled.]) + AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCBC" ;; + 'cfb(aes)') test "$ENABLED_AESCFB" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CFB implementation not enabled.]) + AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCFB" ;; + 'gcm(aes)') test "$ENABLED_AESGCM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-GCM implementation not enabled.]) + test "$ENABLED_AESGCM_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesgcm-stream is required for LKCAPI.]) + AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESGCM" ;; + 'xts(aes)') test "$ENABLED_AESXTS" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-XTS implementation not enabled.]) + AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESXTS" ;; + *) AC_MSG_ERROR([Unsupported LKCAPI algorithm "$lkcapi_alg".]) ;; + esac + done +fi + # Library Suffix LIBSUFFIX="" AC_ARG_WITH([libsuffix], @@ -8958,7 +8995,7 @@ AM_CONDITIONAL([BUILD_SNIFFER], [ test "x$ENABLED_SNIFFER" = "xyes" || test " AM_CONDITIONAL([BUILD_SNIFFTEST],[ test "x$ENABLED_SNIFFTEST" = "xyes"]) AM_CONDITIONAL([BUILD_AESGCM],[test "x$ENABLED_AESGCM" = "xyes" || test "x$ENABLED_USERSETTINGS" = "xyes"]) AM_CONDITIONAL([BUILD_AESCCM],[test "x$ENABLED_AESCCM" = "xyes" || test "x$ENABLED_USERSETTINGS" = "xyes"]) -AM_CONDITIONAL([BUILD_XTS],[test "x$ENABLED_XTS" = "xyes" || test "x$ENABLED_USERSETTINGS" = "xyes"]) +AM_CONDITIONAL([BUILD_AESXTS],[test "x$ENABLED_AESXTS" = "xyes" || test "x$ENABLED_USERSETTINGS" = "xyes"]) AM_CONDITIONAL([BUILD_ARMASM],[test "x$ENABLED_ARMASM" = "xyes"]) AM_CONDITIONAL([BUILD_ARMASM_INLINE],[test "x$ENABLED_ARMASM_INLINE" = "xyes"]) AM_CONDITIONAL([BUILD_ARMASM_CRYPTO],[test "x$ENABLED_ARMASM_CRYPTO" = "xyes"]) @@ -9397,6 +9434,7 @@ echo " * AES-CCM: $ENABLED_AESCCM" echo " * AES-CTR: $ENABLED_AESCTR" echo " * AES-CFB: $ENABLED_AESCFB" echo " * AES-OFB: $ENABLED_AESOFB" +echo " * AES-XTS: $ENABLED_AESXTS" echo " * AES-SIV: $ENABLED_AESSIV" echo " * AES-EAX: $ENABLED_AESEAX" echo " * AES Bitspliced: $ENABLED_AESBS" diff --git a/linuxkm/Kbuild b/linuxkm/Kbuild index 093a7a112..aa1e1c661 100644 --- a/linuxkm/Kbuild +++ b/linuxkm/Kbuild @@ -32,6 +32,10 @@ WOLFSSL_CFLAGS += -ffreestanding -Wframe-larger-than=$(MAX_STACK_FRAME_SIZE) -is ifeq "$(KERNEL_ARCH)" "x86" WOLFSSL_CFLAGS += -mpreferred-stack-boundary=4 +else ifeq "$(KERNEL_ARCH)" "aarch64" + WOLFSSL_CFLAGS += -mno-outline-atomics +else ifeq "$(KERNEL_ARCH)" "arm64" + WOLFSSL_CFLAGS += -mno-outline-atomics endif obj-m := libwolfssl.o @@ -47,9 +51,14 @@ $(obj)/linuxkm/module_exports.o: $(WOLFSSL_OBJ_TARGETS) # this mechanism only works in kernel 5.x+ (fallback to hardcoded value) hostprogs := linuxkm/get_thread_size always-y := $(hostprogs) + +HOST_EXTRACFLAGS += $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CFLAGS) -static -fno-omit-frame-pointer + # "-mindirect-branch=keep -mfunction-return=keep" to avoid "undefined reference # to `__x86_return_thunk'" on CONFIG_RETHUNK kernels (5.19.0-rc7) -HOST_EXTRACFLAGS += $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CFLAGS) -static -fno-omit-frame-pointer -mindirect-branch=keep -mfunction-return=keep +ifeq "$(KERNEL_ARCH)" "x86" + HOST_EXTRACFLAGS += -mindirect-branch=keep -mfunction-return=keep +endif # this rule is needed to get build to succeed in 4.x (get_thread_size still doesn't get built) $(obj)/linuxkm/get_thread_size: $(src)/linuxkm/get_thread_size.c @@ -149,10 +158,10 @@ ifneq "$(quiet)" "silent_" endif @cd "$(obj)" || exit $$?; \ for file in $(WOLFCRYPT_PIE_FILES); do \ - $(OBJCOPY) --rename-section .text=.text.wolfcrypt --rename-section .data=.data.wolfcrypt "$$file" || exit $$?; \ + $(OBJCOPY) --rename-section .text=.text.wolfcrypt --rename-section .data=.data.wolfcrypt --rename-section .rodata=.rodata.wolfcrypt "$$file" || exit $$?; \ done ifneq "$(quiet)" "silent_" - @echo ' wolfCrypt .{text,data} sections containerized to .{text,data}.wolfcrypt' + @echo ' wolfCrypt .{text,data,rodata} sections containerized to .{text,data,rodata}.wolfcrypt' endif $(src)/linuxkm/module_exports.c: rename-pie-text-and-data-sections diff --git a/linuxkm/include.am b/linuxkm/include.am index cec11ad2d..b89aab40a 100644 --- a/linuxkm/include.am +++ b/linuxkm/include.am @@ -12,4 +12,5 @@ EXTRA_DIST += m4/ax_linuxkm.m4 \ linuxkm/pie_redirect_table.c \ linuxkm/pie_last.c \ linuxkm/linuxkm_memory.c \ - linuxkm/linuxkm_wc_port.h + linuxkm/linuxkm_wc_port.h \ + linuxkm/lkcapi_glue.c diff --git a/linuxkm/linuxkm_memory.c b/linuxkm/linuxkm_memory.c index 58131606e..81a7dfab3 100644 --- a/linuxkm/linuxkm_memory.c +++ b/linuxkm/linuxkm_memory.c @@ -275,7 +275,7 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void) { struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(1); if (pstate == NULL) - return ENOMEM; + return MEMORY_E; /* allow for nested calls */ if (pstate->fpu_state != 0U) { @@ -314,7 +314,7 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void) if (! warned_fpu_forbidden) pr_err("save_vector_registers_x86 called from IRQ handler.\n"); wc_linuxkm_fpu_state_release(pstate); - return EPERM; + return BAD_STATE_E; } else { #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \ (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \ @@ -380,3 +380,11 @@ void my__show_free_areas( return; } #endif + +#if defined(__PIE__) && defined(CONFIG_FORTIFY_SOURCE) +/* needed because FORTIFY_SOURCE inline implementations call fortify_panic(). */ +void __my_fortify_panic(const char *name) { + pr_emerg("__my_fortify_panic in %s\n", name); + BUG(); +} +#endif diff --git a/linuxkm/linuxkm_wc_port.h b/linuxkm/linuxkm_wc_port.h index f815ec354..6048589cb 100644 --- a/linuxkm/linuxkm_wc_port.h +++ b/linuxkm/linuxkm_wc_port.h @@ -65,8 +65,8 @@ (int)_xatoi_res; \ }) - /* Kbuild+gcc on x86 doesn't consistently honor the default ALIGN16 on stack objects, - * but gives adequate alignment with "32". + /* Kbuild+gcc on x86 doesn't consistently honor the default ALIGN16 on stack + * objects, but gives adequate alignment with "32". */ #if defined(CONFIG_X86) && !defined(ALIGN16) #define ALIGN16 __attribute__ ( (aligned (32))) @@ -119,8 +119,133 @@ #include #include #include + + #ifdef CONFIG_FORTIFY_SOURCE + #ifdef __PIE__ + /* the inline definitions in fortify-string.h use non-inline + * fortify_panic(). + */ + extern void __my_fortify_panic(const char *name) __noreturn __cold; + #define fortify_panic __my_fortify_panic + #endif + + /* the _FORTIFY_SOURCE macros and implementations for several string + * functions are incompatible with libwolfssl, so just reimplement with + * inlines and remap with macros. + */ + + #define __ARCH_STRLEN_NO_REDIRECT + #define __ARCH_MEMCPY_NO_REDIRECT + #define __ARCH_MEMSET_NO_REDIRECT + #define __ARCH_MEMMOVE_NO_REDIRECT + + /* the inline definitions in fortify-string.h use non-inline + * strlen(). + */ + static inline size_t strlen(const char *s) { + const char *s_start = s; + while (*s) + ++s; + return (size_t)((uintptr_t)s - (uintptr_t)s_start); + } + + #include + + #undef strlen + #define strlen(s) \ + ((__builtin_constant_p(s) && __builtin_constant_p(*(s))) ? \ + (sizeof(s) - 1) : strlen(s)) + + static inline void *my_memcpy(void *dest, const void *src, size_t n) { + if (! (((uintptr_t)dest | (uintptr_t)src | (uintptr_t)n) + & (uintptr_t)(sizeof(uintptr_t) - 1))) + { + uintptr_t *src_longs = (uintptr_t *)src, + *dest_longs = (uintptr_t *)dest, + *endp = (uintptr_t *)((u8 *)src + n); + while (src_longs < endp) + *dest_longs++ = *src_longs++; + } else { + u8 *src_bytes = (u8 *)src, + *dest_bytes = (u8 *)dest, + *endp = src_bytes + n; + while (src_bytes < endp) + *dest_bytes++ = *src_bytes++; + } + return dest; + } + #undef memcpy + #define memcpy my_memcpy + + static inline void *my_memset(void *dest, int c, size_t n) { + if (! (((uintptr_t)dest | (uintptr_t)n) + & (uintptr_t)(sizeof(uintptr_t) - 1))) + { + uintptr_t c_long = __builtin_choose_expr( + sizeof(uintptr_t) == 8, + (uintptr_t)(u8)c * 0x0101010101010101UL, + (uintptr_t)(u8)c * 0x01010101U + ); + uintptr_t *dest_longs = (uintptr_t *)dest, + *endp = (uintptr_t *)((u8 *)dest_longs + n); + while (dest_longs < endp) + *dest_longs++ = c_long; + } else { + u8 *dest_bytes = (u8 *)dest, *endp = dest_bytes + n; + while (dest_bytes < endp) + *dest_bytes++ = (u8)c; + } + return dest; + } + #undef memset + #define memset my_memset + + static inline void *my_memmove(void *dest, const void *src, size_t n) { + if (! (((uintptr_t)dest | (uintptr_t)src | (uintptr_t)n) + & (uintptr_t)(sizeof(uintptr_t) - 1))) + { + uintptr_t *src_longs = (uintptr_t *)src, + *dest_longs = (uintptr_t *)dest; + n >>= __builtin_choose_expr( + sizeof(uintptr_t) == 8, + 3U, + 2U); + if (src_longs < dest_longs) { + uintptr_t *startp = src_longs; + src_longs += n - 1; + dest_longs += n - 1; + while (src_longs >= startp) + *dest_longs-- = *src_longs--; + } else if (src_longs > dest_longs) { + uintptr_t *endp = src_longs + n; + while (src_longs < endp) + *dest_longs++ = *src_longs++; + } + } else { + u8 *src_bytes = (u8 *)src, *dest_bytes = (u8 *)dest; + if (src_bytes < dest_bytes) { + u8 *startp = src_bytes; + src_bytes += n - 1; + dest_bytes += n - 1; + while (src_bytes >= startp) + *dest_bytes-- = *src_bytes--; + } else if (src_bytes > dest_bytes) { + u8 *endp = src_bytes + n; + while (src_bytes < endp) + *dest_bytes++ = *src_bytes++; + } + } + return dest; + } + #undef memmove + #define memmove my_memmove + + #endif /* CONFIG_FORTIFY_SOURCE */ + #include #include + #include + #ifdef __PIE__ /* without this, mm.h brings in static, but not inline, pmd_to_page(), * with direct references to global vmem variables. @@ -146,7 +271,33 @@ #include #include - #if defined(WOLFSSL_AESNI) || defined(USE_INTEL_SPEEDUP) || defined(WOLFSSL_SP_X86_64_ASM) + #ifdef LINUXKM_LKCAPI_REGISTER + #include + #include + #include + #include + #include + + /* the LKCAPI assumes that expanded encrypt and decrypt keys will stay + * loaded simultaneously, and the Linux in-tree implementations have two + * AES key structs in each context, one for each direction. in + * linuxkm/lkcapi_glue.c (used for CBC, CFB, and GCM), we do the same + * thing with "struct km_AesCtx". however, wolfCrypt struct AesXts + * already has two AES expanded keys, the main and tweak, and the tweak + * is always used in the encrypt direction regardless of the main + * direction. to avoid allocating and computing a duplicate second + * tweak encrypt key, we set + * WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS, which adds a second + * Aes slot to wolfCrypt's struct AesXts, and activates support for + * AES_ENCRYPTION_AND_DECRYPTION on AES-XTS. + */ + #ifndef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + #define WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + #endif + #endif + + #if defined(WOLFSSL_AESNI) || defined(USE_INTEL_SPEEDUP) || \ + defined(WOLFSSL_SP_X86_64_ASM) #ifndef CONFIG_X86 #error X86 SIMD extensions requested, but CONFIG_X86 is not set. #endif @@ -172,20 +323,38 @@ #endif #endif - /* benchmarks.c uses floating point math, so needs a working SAVE_VECTOR_REGISTERS(). */ - #if defined(WOLFSSL_LINUXKM_BENCHMARKS) && !defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) + /* benchmarks.c uses floating point math, so needs a working + * SAVE_VECTOR_REGISTERS(). + */ + #if defined(WOLFSSL_LINUXKM_BENCHMARKS) && \ + !defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) #define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS #endif - #if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86) + #if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && \ + defined(CONFIG_X86) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) #include #else #include #endif #ifndef SAVE_VECTOR_REGISTERS - #define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_x86(); if (_svr_ret != 0) { fail_clause } } - #define SAVE_VECTOR_REGISTERS2() save_vector_registers_x86() + #define SAVE_VECTOR_REGISTERS(fail_clause) { \ + int _svr_ret = save_vector_registers_x86(); \ + if (_svr_ret != 0) { \ + fail_clause \ + } \ + } + #ifdef DEBUG_VECTOR_REGISTER_ACCESS_FUZZING + #define SAVE_VECTOR_REGISTERS2() ({ \ + int _fuzzer_ret = SAVE_VECTOR_REGISTERS2_fuzzer(); \ + (_fuzzer_ret == 0) ? \ + save_vector_registers_x86() : \ + _fuzzer_ret; \ + }) + #else + #define SAVE_VECTOR_REGISTERS2() save_vector_registers_x86() + #endif #endif #ifndef RESTORE_VECTOR_REGISTERS #define RESTORE_VECTOR_REGISTERS() restore_vector_registers_x86() @@ -291,6 +460,11 @@ #else typeof(printk) *printk; #endif + +#ifdef CONFIG_FORTIFY_SOURCE + typeof(__warn_printk) *__warn_printk; +#endif + typeof(snprintf) *snprintf; const unsigned char *_ctype; @@ -434,6 +608,11 @@ #else #define printk (wolfssl_linuxkm_get_pie_redirect_table()->printk) #endif + + #ifdef CONFIG_FORTIFY_SOURCE + #define __warn_printk (wolfssl_linuxkm_get_pie_redirect_table()->__warn_printk) + #endif + #define snprintf (wolfssl_linuxkm_get_pie_redirect_table()->snprintf) #define _ctype (wolfssl_linuxkm_get_pie_redirect_table()->_ctype) @@ -643,8 +822,9 @@ #define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), GFP_KERNEL) #endif -#ifdef WOLFSSL_TRACK_MEMORY #include + +#ifdef WOLFSSL_TRACK_MEMORY #define XMALLOC(s, h, t) ({(void)(h); (void)(t); wolfSSL_Malloc(s);}) #ifdef WOLFSSL_XFREE_NO_NULLNESS_CHECK #define XFREE(p, h, t) ({(void)(h); (void)(t); wolfSSL_Free(p);}) diff --git a/linuxkm/lkcapi_glue.c b/linuxkm/lkcapi_glue.c new file mode 100644 index 000000000..bfed01eb9 --- /dev/null +++ b/linuxkm/lkcapi_glue.c @@ -0,0 +1,2697 @@ +/* lkcapi_glue.c -- glue logic to register wolfCrypt implementations with + * the Linux Kernel Cryptosystem + * + * Copyright (C) 2006-2024 wolfSSL Inc. + * + * This file is part of wolfSSL. + * + * wolfSSL is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * wolfSSL is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA + */ + +#ifndef LINUXKM_LKCAPI_REGISTER + #error lkcapi_glue.c included in non-LINUXKM_LKCAPI_REGISTER project. +#endif + +#if defined(LINUXKM_LKCAPI_REGISTER_AESGCM) && defined(WOLFSSL_AESNI) && \ + defined(WC_AES_C_DYNAMIC_FALLBACK) + /* xxx temporary */ + #error LINUXKM_LKCAPI_REGISTER_AESGCM is incompatible with WOLFSSL_AESNI && WC_AES_C_DYNAMIC_FALLBACK +#endif + +#ifndef WOLFSSL_LINUXKM_LKCAPI_PRIORITY +/* Larger number means higher priority. The highest in-tree priority is 4001, + * in the Cavium driver. + */ +#define WOLFSSL_LINUXKM_LKCAPI_PRIORITY 10000 +#endif + +#ifndef NO_AES + +/* note the FIPS code will be returned on failure even in non-FIPS builds. */ +#define LINUXKM_LKCAPI_AES_KAT_MISMATCH_E AES_KAT_FIPS_E +#define LINUXKM_LKCAPI_AESGCM_KAT_MISMATCH_E AESGCM_KAT_FIPS_E + +#define WOLFKM_AESCBC_NAME "cbc(aes)" +#define WOLFKM_AESCFB_NAME "cfb(aes)" +#define WOLFKM_AESGCM_NAME "gcm(aes)" +#define WOLFKM_AESXTS_NAME "xts(aes)" + +#ifdef WOLFSSL_AESNI + #define WOLFKM_DRIVER_ISA_EXT "-aesni" +#else + #define WOLFKM_DRIVER_ISA_EXT "" +#endif + +#ifdef HAVE_FIPS + #ifndef HAVE_FIPS_VERSION + #define WOLFKM_DRIVER_FIPS "-fips-140" + #elif HAVE_FIPS_VERSION >= 5 + #define WOLFKM_DRIVER_FIPS "-fips-140-3" + #elif HAVE_FIPS_VERSION == 2 + #define WOLFKM_DRIVER_FIPS "-fips-140-2" + #else + #define WOLFKM_DRIVER_FIPS "-fips-140" + #endif +#else + #define WOLFKM_DRIVER_FIPS "" +#endif + +#define WOLFKM_DRIVER_SUFFIX \ + WOLFKM_DRIVER_ISA_EXT WOLFKM_DRIVER_FIPS "-wolfcrypt" + +#define WOLFKM_AESCBC_DRIVER ("cbc-aes" WOLFKM_DRIVER_SUFFIX) +#define WOLFKM_AESCFB_DRIVER ("cfb-aes" WOLFKM_DRIVER_SUFFIX) +#define WOLFKM_AESGCM_DRIVER ("gcm-aes" WOLFKM_DRIVER_SUFFIX) +#define WOLFKM_AESXTS_DRIVER ("xts-aes" WOLFKM_DRIVER_SUFFIX) + +#if defined(HAVE_AES_CBC) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCBC)) +static int linuxkm_test_aescbc(void); +#endif +#if defined(WOLFSSL_AES_CFB) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCFB)) +static int linuxkm_test_aescfb(void); +#endif +#if defined(HAVE_AESGCM) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESGCM)) && \ + (! (defined(WOLFSSL_AESNI) && defined(WC_AES_C_DYNAMIC_FALLBACK))) +static int linuxkm_test_aesgcm(void); +#endif +#if defined(WOLFSSL_AES_XTS) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESXTS)) +static int linuxkm_test_aesxts(void); +#endif + +/* km_AesX(): wrappers to wolfcrypt wc_AesX functions and + * structures. */ + +#include + +struct km_AesCtx { + Aes *aes_encrypt; /* allocated in km_AesInitCommon() to assure + * alignment, needed for AESNI. + */ + Aes *aes_decrypt; /* same. */ +}; + +#if defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCBC) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCFB) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESGCM) + +static void km_AesExitCommon(struct km_AesCtx * ctx); + +static int km_AesInitCommon( + struct km_AesCtx * ctx, + const char * name, + int need_decryption) +{ + int err; + + ctx->aes_encrypt = (Aes *)malloc(sizeof(*ctx->aes_encrypt)); + + if (! ctx->aes_encrypt) { + pr_err("%s: allocation of %zu bytes for encryption key failed.\n", + name, sizeof(*ctx->aes_encrypt)); + return MEMORY_E; + } + + err = wc_AesInit(ctx->aes_encrypt, NULL, INVALID_DEVID); + + if (unlikely(err)) { + pr_err("%s: wc_AesInit failed: %d\n", name, err); + free(ctx->aes_encrypt); + ctx->aes_encrypt = NULL; + return -EINVAL; + } + + if (! need_decryption) { + ctx->aes_decrypt = NULL; + return 0; + } + + ctx->aes_decrypt = (Aes *)malloc(sizeof(*ctx->aes_decrypt)); + + if (! ctx->aes_decrypt) { + pr_err("%s: allocation of %zu bytes for decryption key failed.\n", + name, sizeof(*ctx->aes_decrypt)); + km_AesExitCommon(ctx); + return MEMORY_E; + } + + err = wc_AesInit(ctx->aes_decrypt, NULL, INVALID_DEVID); + + if (unlikely(err)) { + pr_err("%s: wc_AesInit failed: %d\n", name, err); + free(ctx->aes_decrypt); + ctx->aes_decrypt = NULL; + km_AesExitCommon(ctx); + return -EINVAL; + } + + return 0; +} + +static void km_AesExitCommon(struct km_AesCtx * ctx) +{ + if (ctx->aes_encrypt) { + wc_AesFree(ctx->aes_encrypt); + free(ctx->aes_encrypt); + ctx->aes_encrypt = NULL; + } + if (ctx->aes_decrypt) { + wc_AesFree(ctx->aes_decrypt); + free(ctx->aes_decrypt); + ctx->aes_decrypt = NULL; + } +} + +static int km_AesSetKeyCommon(struct km_AesCtx * ctx, const u8 *in_key, + unsigned int key_len, const char * name) +{ + int err; + + err = wc_AesSetKey(ctx->aes_encrypt, in_key, key_len, NULL, AES_ENCRYPTION); + + if (unlikely(err)) { + pr_err("%s: wc_AesSetKey for encryption key failed: %d\n", name, err); + return -ENOKEY; + } + + if (ctx->aes_decrypt) { + err = wc_AesSetKey(ctx->aes_decrypt, in_key, key_len, NULL, + AES_DECRYPTION); + + if (unlikely(err)) { + pr_err("%s: wc_AesSetKey for decryption key failed: %d\n", + name, err); + return -ENOKEY; + } + } + + return 0; +} + +#if defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCBC) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCFB) + +static void km_AesExit(struct crypto_skcipher *tfm) +{ + struct km_AesCtx * ctx = crypto_skcipher_ctx(tfm); + km_AesExitCommon(ctx); +} + +#endif /* LINUXKM_LKCAPI_REGISTER_ALL || + * LINUXKM_LKCAPI_REGISTER_AESCBC || + * LINUXKM_LKCAPI_REGISTER_AESCFB + */ + +#endif /* LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESCBC || + * LINUXKM_LKCAPI_REGISTER_AESCFB || LINUXKM_LKCAPI_REGISTER_AESGCM + */ + +#if defined(HAVE_AES_CBC) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCBC)) + +static int km_AesCbcInit(struct crypto_skcipher *tfm) +{ + struct km_AesCtx * ctx = crypto_skcipher_ctx(tfm); + return km_AesInitCommon(ctx, WOLFKM_AESCBC_DRIVER, 1); +} + +static int km_AesCbcSetKey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct km_AesCtx * ctx = crypto_skcipher_ctx(tfm); + return km_AesSetKeyCommon(ctx, in_key, key_len, WOLFKM_AESCBC_DRIVER); +} + +static int km_AesCbcEncrypt(struct skcipher_request *req) +{ + struct crypto_skcipher * tfm = NULL; + struct km_AesCtx * ctx = NULL; + struct skcipher_walk walk; + unsigned int nbytes = 0; + int err = 0; + + tfm = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(tfm); + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_virt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + + while ((nbytes = walk.nbytes) != 0) { + err = wc_AesSetIV(ctx->aes_encrypt, walk.iv); + + if (unlikely(err)) { + pr_err("%s: wc_AesSetIV failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = wc_AesCbcEncrypt(ctx->aes_encrypt, walk.dst.virt.addr, + walk.src.virt.addr, nbytes); + + if (unlikely(err)) { + pr_err("%s: wc_AesCbcEncrypt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + } + + return err; +} + +static int km_AesCbcDecrypt(struct skcipher_request *req) +{ + struct crypto_skcipher * tfm = NULL; + struct km_AesCtx * ctx = NULL; + struct skcipher_walk walk; + unsigned int nbytes = 0; + int err = 0; + + tfm = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(tfm); + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_virt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + + while ((nbytes = walk.nbytes) != 0) { + err = wc_AesSetIV(ctx->aes_decrypt, walk.iv); + + if (unlikely(err)) { + pr_err("%s: wc_AesSetKey failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = wc_AesCbcDecrypt(ctx->aes_decrypt, walk.dst.virt.addr, + walk.src.virt.addr, nbytes); + + if (unlikely(err)) { + pr_err("%s: wc_AesCbcDecrypt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + } + + return err; +} + +static struct skcipher_alg cbcAesAlg = { + .base.cra_name = WOLFKM_AESCBC_NAME, + .base.cra_driver_name = WOLFKM_AESCBC_DRIVER, + .base.cra_priority = WOLFSSL_LINUXKM_LKCAPI_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct km_AesCtx), + .base.cra_module = THIS_MODULE, + .init = km_AesCbcInit, + .exit = km_AesExit, + .min_keysize = AES_128_KEY_SIZE, + .max_keysize = AES_256_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = km_AesCbcSetKey, + .encrypt = km_AesCbcEncrypt, + .decrypt = km_AesCbcDecrypt, +}; +static int cbcAesAlg_loaded = 0; + +#endif /* HAVE_AES_CBC && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESCBC) + */ + +#if defined(WOLFSSL_AES_CFB) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCFB)) + +static int km_AesCfbInit(struct crypto_skcipher *tfm) +{ + struct km_AesCtx * ctx = crypto_skcipher_ctx(tfm); + return km_AesInitCommon(ctx, WOLFKM_AESCFB_DRIVER, 0); +} + +static int km_AesCfbSetKey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct km_AesCtx * ctx = crypto_skcipher_ctx(tfm); + return km_AesSetKeyCommon(ctx, in_key, key_len, WOLFKM_AESCFB_DRIVER); +} + +static int km_AesCfbEncrypt(struct skcipher_request *req) +{ + struct crypto_skcipher * tfm = NULL; + struct km_AesCtx * ctx = NULL; + struct skcipher_walk walk; + unsigned int nbytes = 0; + int err = 0; + + tfm = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(tfm); + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_virt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + + while ((nbytes = walk.nbytes) != 0) { + err = wc_AesSetIV(ctx->aes_encrypt, walk.iv); + + if (unlikely(err)) { + pr_err("%s: wc_AesSetKey failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = wc_AesCfbEncrypt(ctx->aes_encrypt, walk.dst.virt.addr, + walk.src.virt.addr, nbytes); + + if (unlikely(err)) { + pr_err("%s: wc_AesCfbEncrypt failed %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + } + + return err; +} + +static int km_AesCfbDecrypt(struct skcipher_request *req) +{ + struct crypto_skcipher * tfm = NULL; + struct km_AesCtx * ctx = NULL; + struct skcipher_walk walk; + unsigned int nbytes = 0; + int err = 0; + + tfm = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(tfm); + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_virt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + + while ((nbytes = walk.nbytes) != 0) { + err = wc_AesSetIV(ctx->aes_encrypt, walk.iv); + + if (unlikely(err)) { + pr_err("%s: wc_AesSetKey failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = wc_AesCfbDecrypt(ctx->aes_encrypt, walk.dst.virt.addr, + walk.src.virt.addr, nbytes); + + if (unlikely(err)) { + pr_err("%s: wc_AesCfbDecrypt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + } + + return err; +} + +static struct skcipher_alg cfbAesAlg = { + .base.cra_name = WOLFKM_AESCFB_NAME, + .base.cra_driver_name = WOLFKM_AESCFB_DRIVER, + .base.cra_priority = WOLFSSL_LINUXKM_LKCAPI_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct km_AesCtx), + .base.cra_module = THIS_MODULE, + .init = km_AesCfbInit, + .exit = km_AesExit, + .min_keysize = AES_128_KEY_SIZE, + .max_keysize = AES_256_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = km_AesCfbSetKey, + .encrypt = km_AesCfbEncrypt, + .decrypt = km_AesCfbDecrypt, +}; +static int cfbAesAlg_loaded = 0; + +#endif /* WOLFSSL_AES_CFB && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESCBC) + */ + +#if defined(HAVE_AESGCM) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESGCM)) && \ + (! (defined(WOLFSSL_AESNI) && defined(WC_AES_C_DYNAMIC_FALLBACK))) + +#ifndef WOLFSSL_AESGCM_STREAM + #error LKCAPI registration of AES-GCM requires WOLFSSL_AESGCM_STREAM (--enable-aesgcm-stream). +#endif + +static int km_AesGcmInit(struct crypto_aead * tfm) +{ + struct km_AesCtx * ctx = crypto_aead_ctx(tfm); + return km_AesInitCommon(ctx, WOLFKM_AESGCM_DRIVER, 0); +} + +static void km_AesGcmExit(struct crypto_aead * tfm) +{ + struct km_AesCtx * ctx = crypto_aead_ctx(tfm); + km_AesExitCommon(ctx); +} + +static int km_AesGcmSetKey(struct crypto_aead *tfm, const u8 *in_key, + unsigned int key_len) +{ + int err; + struct km_AesCtx * ctx = crypto_aead_ctx(tfm); + + err = wc_AesGcmSetKey(ctx->aes_encrypt, in_key, key_len); + + if (unlikely(err)) { + pr_err("%s: wc_AesGcmSetKey failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -ENOKEY; + } + + return 0; +} + +static int km_AesGcmSetAuthsize(struct crypto_aead *tfm, unsigned int authsize) +{ + (void)tfm; + if (authsize > AES_BLOCK_SIZE || + authsize < WOLFSSL_MIN_AUTH_TAG_SZ) { + pr_err("%s: invalid authsize: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), authsize); + return -EINVAL; + } + return 0; +} + +/* + * aead ciphers recieve data in scatterlists in following order: + * encrypt + * req->src: aad||plaintext + * req->dst: aad||ciphertext||tag + * decrypt + * req->src: aad||ciphertext||tag + * req->dst: aad||plaintext, return 0 or -EBADMSG + */ + +static int km_AesGcmEncrypt(struct aead_request *req) +{ + struct crypto_aead * tfm = NULL; + struct km_AesCtx * ctx = NULL; + struct skcipher_walk walk; + struct scatter_walk assocSgWalk; + unsigned int nbytes = 0; + u8 authTag[AES_BLOCK_SIZE]; + int err = 0; + unsigned int assocLeft = 0; + unsigned int cryptLeft = 0; + u8 * assoc = NULL; + + tfm = crypto_aead_reqtfm(req); + ctx = crypto_aead_ctx(tfm); + assocLeft = req->assoclen; + cryptLeft = req->cryptlen; + + scatterwalk_start(&assocSgWalk, req->src); + + err = skcipher_walk_aead_encrypt(&walk, req, false); + if (unlikely(err)) { + pr_err("%s: skcipher_walk_aead_encrypt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -1; + } + + err = wc_AesGcmInit(ctx->aes_encrypt, NULL /*key*/, 0 /*keylen*/, walk.iv, + AES_BLOCK_SIZE); + if (unlikely(err)) { + pr_err("%s: wc_AesGcmInit failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -EINVAL; + } + + assoc = scatterwalk_map(&assocSgWalk); + if (unlikely(IS_ERR(assoc))) { + pr_err("%s: scatterwalk_map failed: %ld\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), + PTR_ERR(assoc)); + return err; + } + + err = wc_AesGcmEncryptUpdate(ctx->aes_encrypt, NULL, NULL, 0, + assoc, assocLeft); + assocLeft -= assocLeft; + scatterwalk_unmap(assoc); + assoc = NULL; + + if (unlikely(err)) { + pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -EINVAL; + } + + while ((nbytes = walk.nbytes) != 0) { + int n = nbytes; + + if (likely(cryptLeft && nbytes)) { + n = cryptLeft < nbytes ? cryptLeft : nbytes; + + err = wc_AesGcmEncryptUpdate( + ctx->aes_encrypt, + walk.dst.virt.addr, + walk.src.virt.addr, + cryptLeft, + NULL, 0); + nbytes -= n; + cryptLeft -= n; + } + + if (unlikely(err)) { + pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return err; + } + } + + err = wc_AesGcmEncryptFinal(ctx->aes_encrypt, authTag, tfm->authsize); + if (unlikely(err)) { + pr_err("%s: wc_AesGcmEncryptFinal failed with return code %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -EINVAL; + } + + /* Now copy the auth tag into request scatterlist. */ + scatterwalk_map_and_copy(authTag, req->dst, + req->assoclen + req->cryptlen, + tfm->authsize, 1); + + return err; +} + +static int km_AesGcmDecrypt(struct aead_request *req) +{ + struct crypto_aead * tfm = NULL; + struct km_AesCtx * ctx = NULL; + struct skcipher_walk walk; + struct scatter_walk assocSgWalk; + unsigned int nbytes = 0; + u8 origAuthTag[AES_BLOCK_SIZE]; + int err = 0; + unsigned int assocLeft = 0; + unsigned int cryptLeft = 0; + u8 * assoc = NULL; + + tfm = crypto_aead_reqtfm(req); + ctx = crypto_aead_ctx(tfm); + assocLeft = req->assoclen; + cryptLeft = req->cryptlen - tfm->authsize; + + /* Copy out original auth tag from req->src. */ + scatterwalk_map_and_copy(origAuthTag, req->src, + req->assoclen + req->cryptlen - tfm->authsize, + tfm->authsize, 0); + + scatterwalk_start(&assocSgWalk, req->src); + + err = skcipher_walk_aead_decrypt(&walk, req, false); + if (unlikely(err)) { + pr_err("%s: skcipher_walk_aead_decrypt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return err; + } + + err = wc_AesGcmInit(ctx->aes_encrypt, NULL /*key*/, 0 /*keylen*/, walk.iv, + AES_BLOCK_SIZE); + if (unlikely(err)) { + pr_err("%s: wc_AesGcmInit failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -EINVAL; + } + + assoc = scatterwalk_map(&assocSgWalk); + if (unlikely(IS_ERR(assoc))) { + pr_err("%s: scatterwalk_map failed: %ld\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), + PTR_ERR(assoc)); + return err; + } + + err = wc_AesGcmDecryptUpdate(ctx->aes_encrypt, NULL, NULL, 0, + assoc, assocLeft); + assocLeft -= assocLeft; + scatterwalk_unmap(assoc); + assoc = NULL; + + if (unlikely(err)) { + pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -EINVAL; + } + + while ((nbytes = walk.nbytes) != 0) { + int n = nbytes; + + if (likely(cryptLeft && nbytes)) { + n = cryptLeft < nbytes ? cryptLeft : nbytes; + + err = wc_AesGcmDecryptUpdate( + ctx->aes_encrypt, + walk.dst.virt.addr, + walk.src.virt.addr, + cryptLeft, + NULL, 0); + nbytes -= n; + cryptLeft -= n; + } + + if (unlikely(err)) { + pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + return err; + } + } + + err = wc_AesGcmDecryptFinal(ctx->aes_encrypt, origAuthTag, tfm->authsize); + if (unlikely(err)) { + pr_err("%s: wc_AesGcmDecryptFinal failed with return code %d\n", + crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); + + if (err == AES_GCM_AUTH_E) { + return -EBADMSG; + } + else { + return -EINVAL; + } + } + + return err; +} + +static struct aead_alg gcmAesAead = { + .base.cra_name = WOLFKM_AESGCM_NAME, + .base.cra_driver_name = WOLFKM_AESGCM_DRIVER, + .base.cra_priority = WOLFSSL_LINUXKM_LKCAPI_PRIORITY, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct km_AesCtx), + .base.cra_module = THIS_MODULE, + .init = km_AesGcmInit, + .exit = km_AesGcmExit, + .setkey = km_AesGcmSetKey, + .setauthsize = km_AesGcmSetAuthsize, + .encrypt = km_AesGcmEncrypt, + .decrypt = km_AesGcmDecrypt, + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = AES_BLOCK_SIZE, + .chunksize = AES_BLOCK_SIZE, +}; +static int gcmAesAead_loaded = 0; + +#endif /* HAVE_AESGCM && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESGCM) && + * (! (WOLFSSL_AESNI && WC_AES_C_DYNAMIC_FALLBACK)) + */ + +#if defined(WOLFSSL_AES_XTS) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESXTS)) + +struct km_AesXtsCtx { + XtsAes *aesXts; /* allocated in km_AesXtsInitCommon() to assure alignment + * for AESNI. + */ +}; + +static int km_AesXtsInitCommon(struct km_AesXtsCtx * ctx, const char * name) +{ + int err; + + ctx->aesXts = (XtsAes *)malloc(sizeof(*ctx->aesXts)); + + if (! ctx->aesXts) + return -MEMORY_E; + + err = wc_AesXtsInit(ctx->aesXts, NULL, INVALID_DEVID); + + if (unlikely(err)) { + pr_err("%s: km_AesXtsInitCommon failed: %d\n", name, err); + return -EINVAL; + } + + return 0; +} + +static int km_AesXtsInit(struct crypto_skcipher *tfm) +{ + struct km_AesXtsCtx * ctx = crypto_skcipher_ctx(tfm); + return km_AesXtsInitCommon(ctx, WOLFKM_AESXTS_DRIVER); +} + +static void km_AesXtsExit(struct crypto_skcipher *tfm) +{ + struct km_AesXtsCtx * ctx = crypto_skcipher_ctx(tfm); + wc_AesXtsFree(ctx->aesXts); + free(ctx->aesXts); + ctx->aesXts = NULL; +} + +static int km_AesXtsSetKey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + int err; + struct km_AesXtsCtx * ctx = crypto_skcipher_ctx(tfm); + + err = wc_AesXtsSetKeyNoInit(ctx->aesXts, in_key, key_len, + AES_ENCRYPTION_AND_DECRYPTION); + + if (unlikely(err)) { + pr_err("%s: wc_AesXtsSetKeyNoInit failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + return 0; +} + +/* see /usr/src/linux/drivers/md/dm-crypt.c */ + +static int km_AesXtsEncrypt(struct skcipher_request *req) +{ + int err = 0; + + struct crypto_skcipher * tfm = NULL; + struct km_AesXtsCtx * ctx = NULL; + struct skcipher_walk walk; + unsigned int nbytes = 0; + + tfm = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(tfm); + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_virt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + + while ((nbytes = walk.nbytes) != 0) { + err = wc_AesXtsEncrypt(ctx->aesXts, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, + walk.iv, walk.ivsize); + + if (unlikely(err)) { + pr_err("%s: wc_AesXtsEncrypt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + } + + return err; +} + +static int km_AesXtsDecrypt(struct skcipher_request *req) +{ + int err = 0; + struct crypto_skcipher * tfm = NULL; + struct km_AesXtsCtx * ctx = NULL; + struct skcipher_walk walk; + unsigned int nbytes = 0; + + tfm = crypto_skcipher_reqtfm(req); + ctx = crypto_skcipher_ctx(tfm); + + err = skcipher_walk_virt(&walk, req, false); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_virt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + + while ((nbytes = walk.nbytes) != 0) { + err = wc_AesXtsDecrypt(ctx->aesXts, walk.dst.virt.addr, + walk.src.virt.addr, nbytes, + walk.iv, walk.ivsize); + + if (unlikely(err)) { + pr_err("%s: wc_AesCbcDecrypt failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return -EINVAL; + } + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + + if (unlikely(err)) { + pr_err("%s: skcipher_walk_done failed: %d\n", + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); + return err; + } + } + + return err; +} + +static struct skcipher_alg xtsAesAlg = { + .base.cra_name = WOLFKM_AESXTS_NAME, + .base.cra_driver_name = WOLFKM_AESXTS_DRIVER, + .base.cra_priority = WOLFSSL_LINUXKM_LKCAPI_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct km_AesXtsCtx), + .base.cra_module = THIS_MODULE, + + .min_keysize = 2 * AES_128_KEY_SIZE, + .max_keysize = 2 * AES_256_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .walksize = 2 * AES_BLOCK_SIZE, + .init = km_AesXtsInit, + .exit = km_AesXtsExit, + .setkey = km_AesXtsSetKey, + .encrypt = km_AesXtsEncrypt, + .decrypt = km_AesXtsDecrypt +}; +static int xtsAesAlg_loaded = 0; + +#endif /* WOLFSSL_AES_XTS && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESXTS) + */ + +/* cipher tests, cribbed from test.c, with supplementary LKCAPI tests: */ + +#if defined(HAVE_AES_CBC) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCBC)) + +static int linuxkm_test_aescbc(void) +{ + int ret = 0; + struct crypto_skcipher * tfm = NULL; + struct skcipher_request * req = NULL; + struct scatterlist src, dst; + Aes aes; + static const byte key32[] = + { + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66 + }; + static const byte p_vector[] = + /* Now is the time for all good men w/o trailing 0 */ + { + 0x4e,0x6f,0x77,0x20,0x69,0x73,0x20,0x74, + 0x68,0x65,0x20,0x74,0x69,0x6d,0x65,0x20, + 0x66,0x6f,0x72,0x20,0x61,0x6c,0x6c,0x20, + 0x67,0x6f,0x6f,0x64,0x20,0x6d,0x65,0x6e + }; + static const byte iv[] = "1234567890abcdef"; + + static const byte c_vector[] = + { + 0xd7,0xd6,0x04,0x5b,0x4d,0xc4,0x90,0xdf, + 0x4a,0x82,0xed,0x61,0x26,0x4e,0x23,0xb3, + 0xe4,0xb5,0x85,0x30,0x29,0x4c,0x9d,0xcf, + 0x73,0xc9,0x46,0xd1,0xaa,0xc8,0xcb,0x62 + }; + + byte iv_copy[sizeof(iv)]; + byte enc[sizeof(p_vector)]; + byte dec[sizeof(p_vector)]; + u8 * enc2 = NULL; + u8 * dec2 = NULL; + + const char *driver_name; + + XMEMSET(enc, 0, sizeof(enc)); + XMEMSET(dec, 0, sizeof(enc)); + + ret = wc_AesInit(&aes, NULL, INVALID_DEVID); + if (ret) { + pr_err("wolfcrypt wc_AesInit failed with return code %d.\n", ret); + return ret; + } + + ret = wc_AesSetKey(&aes, key32, AES_BLOCK_SIZE * 2, iv, AES_ENCRYPTION); + if (ret) { + pr_err("wolfcrypt wc_AesSetKey failed with return code %d\n", ret); + return ret; + } + + ret = wc_AesCbcEncrypt(&aes, enc, p_vector, sizeof(p_vector)); + if (ret) { + pr_err("wolfcrypt wc_AesCbcEncrypt failed with return code %d\n", ret); + return ret; + } + + if (XMEMCMP(enc, c_vector, sizeof(c_vector)) != 0) { + pr_err("wolfcrypt wc_AesCbcEncrypt KAT mismatch\n"); + return LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + } + + /* Re init for decrypt and set flag. */ + wc_AesFree(&aes); + + ret = wc_AesInit(&aes, NULL, INVALID_DEVID); + if (ret) { + pr_err("wolfcrypt wc_AesInit failed with return code %d.\n", ret); + return ret; + } + + ret = wc_AesSetKey(&aes, key32, AES_BLOCK_SIZE * 2, iv, AES_DECRYPTION); + if (ret) { + pr_err("wolfcrypt wc_AesSetKey failed with return code %d.\n", ret); + return ret; + } + + ret = wc_AesCbcDecrypt(&aes, dec, enc, sizeof(p_vector)); + if (ret) { + pr_err("wolfcrypt wc_AesCbcDecrypt failed with return code %d\n", ret); + return ret; + } + + ret = XMEMCMP(p_vector, dec, sizeof(p_vector)); + if (ret) { + pr_err("error: p_vector and dec do not match: %d\n", ret); + return ret; + } + + /* now the kernel crypto part */ + enc2 = kmalloc(sizeof(p_vector), GFP_KERNEL); + if (!enc2) { + pr_err("error: kmalloc failed\n"); + goto test_cbc_end; + } + + dec2 = kmalloc(sizeof(p_vector), GFP_KERNEL); + if (!dec2) { + pr_err("error: kmalloc failed\n"); + goto test_cbc_end; + } + + memcpy(dec2, p_vector, sizeof(p_vector)); + + tfm = crypto_alloc_skcipher(WOLFKM_AESCBC_NAME, 0, 0); + if (IS_ERR(tfm)) { + pr_err("error: allocating AES skcipher algorithm %s failed: %ld\n", + WOLFKM_AESCBC_DRIVER, PTR_ERR(tfm)); + goto test_cbc_end; + } + + driver_name = crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); + if (strcmp(driver_name, WOLFKM_AESCBC_DRIVER)) { + pr_err("error: unexpected implementation for %s: %s (expected %s)\n", + WOLFKM_AESCBC_NAME, driver_name, WOLFKM_AESCBC_DRIVER); + ret = -ENOENT; + goto test_cbc_end; + } + + ret = crypto_skcipher_setkey(tfm, key32, AES_BLOCK_SIZE * 2); + if (ret) { + pr_err("error: crypto_skcipher_setkey returned: %d\n", ret); + goto test_cbc_end; + } + + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (IS_ERR(req)) { + pr_err("error: allocating AES skcipher request %s failed\n", + WOLFKM_AESCBC_DRIVER); + goto test_cbc_end; + } + + sg_init_one(&src, dec2, sizeof(p_vector)); + sg_init_one(&dst, enc2, sizeof(p_vector)); + + XMEMCPY(iv_copy, iv, sizeof(iv)); + skcipher_request_set_crypt(req, &src, &dst, sizeof(p_vector), iv_copy); + + ret = crypto_skcipher_encrypt(req); + + if (ret) { + pr_err("error: crypto_skcipher_encrypt returned: %d\n", ret); + goto test_cbc_end; + } + + ret = XMEMCMP(enc, enc2, sizeof(p_vector)); + if (ret) { + pr_err("error: enc and enc2 do not match: %d\n", ret); + goto test_cbc_end; + } + + memset(dec2, 0, sizeof(p_vector)); + sg_init_one(&src, enc2, sizeof(p_vector)); + sg_init_one(&dst, dec2, sizeof(p_vector)); + + XMEMCPY(iv_copy, iv, sizeof(iv)); + skcipher_request_set_crypt(req, &src, &dst, sizeof(p_vector), iv_copy); + + ret = crypto_skcipher_decrypt(req); + + if (ret) { + pr_err("ERROR: crypto_skcipher_decrypt returned %d\n", ret); + goto test_cbc_end; + } + + ret = XMEMCMP(dec, dec2, sizeof(p_vector)); + if (ret) { + pr_err("error: dec and dec2 do not match: %d\n", ret); + goto test_cbc_end; + } + +test_cbc_end: + + if (enc2) { kfree(enc2); enc2 = NULL; } + if (dec2) { kfree(dec2); dec2 = NULL; } + if (req) { skcipher_request_free(req); req = NULL; } + if (tfm) { crypto_free_skcipher(tfm); tfm = NULL; } + + return ret; +} + +#endif /* HAVE_AES_CBC && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESCBC) + */ + +#if defined(WOLFSSL_AES_CFB) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCFB)) + +static int linuxkm_test_aescfb(void) +{ + int ret = 0; + struct crypto_skcipher * tfm = NULL; + struct skcipher_request * req = NULL; + struct scatterlist src, dst; + Aes aes; + static const byte key32[] = + { + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66 + }; + static const byte p_vector[] = + /* Now is the time for all good men w/o trailing 0 */ + { + 0x4e,0x6f,0x77,0x20,0x69,0x73,0x20,0x74, + 0x68,0x65,0x20,0x74,0x69,0x6d,0x65,0x20, + 0x66,0x6f,0x72,0x20,0x61,0x6c,0x6c,0x20, + 0x67,0x6f,0x6f,0x64,0x20,0x6d,0x65,0x6e + }; + static const byte iv[] = "1234567890abcdef"; + static const byte c_vector[] = + { + 0x56,0x35,0x3f,0xdd,0xde,0xa6,0x15,0x87, + 0x57,0xdc,0x34,0x62,0x9a,0x68,0x96,0x51, + 0xc7,0x09,0xb9,0x4e,0x47,0x6b,0x24,0x72, + 0x19,0x5a,0xdf,0x7e,0xba,0xa8,0x01,0xb6 + }; + byte iv_copy[sizeof(iv)]; + byte enc[sizeof(p_vector)]; + byte dec[sizeof(p_vector)]; + u8 * enc2 = NULL; + u8 * dec2 = NULL; + const char *driver_name; + + XMEMSET(enc, 0, sizeof(enc)); + XMEMSET(dec, 0, sizeof(enc)); + + ret = wc_AesInit(&aes, NULL, INVALID_DEVID); + if (ret) { + pr_err("wolfcrypt wc_AesInit failed with return code %d.\n", ret); + return ret; + } + + ret = wc_AesSetKey(&aes, key32, AES_BLOCK_SIZE * 2, iv, AES_ENCRYPTION); + if (ret) { + pr_err("wolfcrypt wc_AesSetKey failed with return code %d\n", ret); + return ret; + } + + ret = wc_AesCfbEncrypt(&aes, enc, p_vector, sizeof(p_vector)); + if (ret) { + pr_err("wolfcrypt wc_AesCfbEncrypt failed with return code %d\n", ret); + return ret; + } + + if (XMEMCMP(enc, c_vector, sizeof(c_vector)) != 0) { + pr_err("wolfcrypt wc_AesCfbEncrypt KAT mismatch\n"); + return LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + } + + /* Re init for decrypt and set flag. */ + wc_AesFree(&aes); + + ret = wc_AesInit(&aes, NULL, INVALID_DEVID); + if (ret) { + pr_err("wolfcrypt wc_AesInit failed with return code %d.\n", ret); + return ret; + } + + ret = wc_AesSetKey(&aes, key32, AES_BLOCK_SIZE * 2, iv, AES_ENCRYPTION); + if (ret) { + pr_err("wolfcrypt wc_AesSetKey failed with return code %d.\n", ret); + return ret; + } + + ret = wc_AesCfbDecrypt(&aes, dec, enc, sizeof(p_vector)); + if (ret) { + pr_err("wolfcrypt wc_AesCfbDecrypt failed with return code %d\n", ret); + return ret; + } + + ret = XMEMCMP(p_vector, dec, sizeof(p_vector)); + if (ret) { + pr_err("error: p_vector and dec do not match: %d\n", ret); + return ret; + } + + /* now the kernel crypto part */ + enc2 = kmalloc(sizeof(p_vector), GFP_KERNEL); + if (!enc2) { + pr_err("error: kmalloc failed\n"); + goto test_cfb_end; + } + + dec2 = kmalloc(sizeof(p_vector), GFP_KERNEL); + if (!dec2) { + pr_err("error: kmalloc failed\n"); + goto test_cfb_end; + } + + memcpy(dec2, p_vector, sizeof(p_vector)); + + tfm = crypto_alloc_skcipher(WOLFKM_AESCFB_NAME, 0, 0); + if (IS_ERR(tfm)) { + pr_err("error: allocating AES skcipher algorithm %s failed: %ld\n", + WOLFKM_AESCFB_DRIVER, PTR_ERR(tfm)); + goto test_cfb_end; + } + + driver_name = crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); + if (strcmp(driver_name, WOLFKM_AESCFB_DRIVER)) { + pr_err("error: unexpected implementation for %s: %s (expected %s)\n", + WOLFKM_AESCFB_NAME, driver_name, WOLFKM_AESCFB_DRIVER); + ret = -ENOENT; + goto test_cfb_end; + } + + ret = crypto_skcipher_setkey(tfm, key32, AES_BLOCK_SIZE * 2); + if (ret) { + pr_err("error: crypto_skcipher_setkey returned: %d\n", ret); + goto test_cfb_end; + } + + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (IS_ERR(req)) { + pr_err("error: allocating AES skcipher request %s failed\n", + WOLFKM_AESCFB_DRIVER); + goto test_cfb_end; + } + + sg_init_one(&src, dec2, sizeof(p_vector)); + sg_init_one(&dst, enc2, sizeof(p_vector)); + + XMEMCPY(iv_copy, iv, sizeof(iv)); + skcipher_request_set_crypt(req, &src, &dst, sizeof(p_vector), iv_copy); + + ret = crypto_skcipher_encrypt(req); + + if (ret) { + pr_err("error: crypto_skcipher_encrypt returned: %d\n", ret); + goto test_cfb_end; + } + + ret = XMEMCMP(enc, enc2, sizeof(p_vector)); + if (ret) { + pr_err("error: enc and enc2 do not match: %d\n", ret); + goto test_cfb_end; + } + + memset(dec2, 0, sizeof(p_vector)); + sg_init_one(&src, enc2, sizeof(p_vector)); + sg_init_one(&dst, dec2, sizeof(p_vector)); + + XMEMCPY(iv_copy, iv, sizeof(iv)); + skcipher_request_set_crypt(req, &src, &dst, sizeof(p_vector), iv_copy); + + ret = crypto_skcipher_decrypt(req); + + if (ret) { + pr_err("error: crypto_skcipher_decrypt returned: %d\n", ret); + goto test_cfb_end; + } + + ret = XMEMCMP(dec, dec2, sizeof(p_vector)); + if (ret) { + pr_err("error: dec and dec2 do not match: %d\n", ret); + goto test_cfb_end; + } + +test_cfb_end: + + if (enc2) { kfree(enc2); enc2 = NULL; } + if (dec2) { kfree(dec2); dec2 = NULL; } + if (req) { skcipher_request_free(req); req = NULL; } + if (tfm) { crypto_free_skcipher(tfm); tfm = NULL; } + + return ret; +} + +#endif /* WOLFSSL_AES_CFB && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESCFB) + */ + +#if defined(HAVE_AESGCM) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESGCM)) && \ + (! (defined(WOLFSSL_AESNI) && defined(WC_AES_C_DYNAMIC_FALLBACK))) + +static int linuxkm_test_aesgcm(void) +{ + int ret = 0; + struct crypto_aead * tfm = NULL; + struct aead_request * req = NULL; + struct scatterlist * src = NULL; + struct scatterlist * dst = NULL; + Aes aes; + static const byte key32[] = + { + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, + 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66 + }; + static const byte p_vector[] = + /* Now is the time for all w/o trailing 0 */ + { + 0x4e,0x6f,0x77,0x20,0x69,0x73,0x20,0x74, + 0x68,0x65,0x20,0x74,0x69,0x6d,0x65,0x20, + 0x66,0x6f,0x72,0x20,0x61,0x6c,0x6c,0x20 + }; + static const byte assoc[] = + { + 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, + 0xab, 0xad, 0xda, 0xd2 + }; + static const byte ivstr[] = "1234567890abcdef"; + static const byte c_vector[] = + { + 0x0c,0x97,0x05,0x3c,0xef,0x5c,0x63,0x6b, + 0x15,0xe4,0x00,0x63,0xf8,0x8c,0xd0,0x95, + 0x27,0x81,0x90,0x9c,0x9f,0xe6,0x98,0xe9 + }; + static const byte KAT_authTag[] = + { + 0xc9,0xd5,0x7a,0x77,0xac,0x28,0xc2,0xe7, + 0xe4,0x28,0x90,0xaa,0x09,0xab,0xf9,0x7c + }; + byte enc[sizeof(p_vector)]; + byte authTag[AES_BLOCK_SIZE]; + byte dec[sizeof(p_vector)]; + u8 * assoc2 = NULL; + u8 * enc2 = NULL; + u8 * dec2 = NULL; + u8 * iv = NULL; + size_t encryptLen = sizeof(p_vector); + size_t decryptLen = sizeof(p_vector) + sizeof(authTag); + const char *driver_name; + + /* Init stack variables. */ + XMEMSET(enc, 0, sizeof(p_vector)); + XMEMSET(dec, 0, sizeof(p_vector)); + XMEMSET(authTag, 0, AES_BLOCK_SIZE); + + ret = wc_AesInit(&aes, NULL, INVALID_DEVID); + if (ret) { + pr_err("error: wc_AesInit failed with return code %d.\n", ret); + goto test_gcm_end; + } + + ret = wc_AesGcmInit(&aes, key32, sizeof(key32)/sizeof(byte), ivstr, + AES_BLOCK_SIZE); + if (ret) { + pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret); + goto test_gcm_end; + } + + ret = wc_AesGcmEncryptUpdate(&aes, NULL, NULL, 0, assoc, sizeof(assoc)); + if (ret) { + pr_err("error: wc_AesGcmEncryptUpdate failed with return code %d\n", + ret); + goto test_gcm_end; + } + + ret = wc_AesGcmEncryptUpdate(&aes, enc, p_vector, sizeof(p_vector), NULL, 0); + if (ret) { + pr_err("error: wc_AesGcmEncryptUpdate failed with return code %d\n", + ret); + goto test_gcm_end; + } + + if (XMEMCMP(enc, c_vector, sizeof(c_vector)) != 0) { + pr_err("wolfcrypt AES-GCM KAT mismatch on ciphertext\n"); + ret = LINUXKM_LKCAPI_AESGCM_KAT_MISMATCH_E; + goto test_gcm_end; + } + + ret = wc_AesGcmEncryptFinal(&aes, authTag, AES_BLOCK_SIZE); + if (ret) { + pr_err("error: wc_AesGcmEncryptFinal failed with return code %d\n", + ret); + goto test_gcm_end; + } + + if (XMEMCMP(authTag, KAT_authTag, sizeof(KAT_authTag)) != 0) { + pr_err("wolfcrypt AES-GCM KAT mismatch on authTag\n"); + ret = LINUXKM_LKCAPI_AESGCM_KAT_MISMATCH_E; + goto test_gcm_end; + } + + ret = wc_AesGcmInit(&aes, key32, sizeof(key32)/sizeof(byte), ivstr, + AES_BLOCK_SIZE); + if (ret) { + pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret); + goto test_gcm_end; + } + + ret = wc_AesGcmDecryptUpdate(&aes, dec, enc, sizeof(p_vector), + assoc, sizeof(assoc)); + if (ret) { + pr_err("error: wc_AesGcmDecryptUpdate failed with return code %d\n", + ret); + goto test_gcm_end; + } + + ret = wc_AesGcmDecryptFinal(&aes, authTag, AES_BLOCK_SIZE); + if (ret) { + pr_err("error: wc_AesGcmEncryptFinal failed with return code %d\n", + ret); + goto test_gcm_end; + } + + ret = XMEMCMP(p_vector, dec, sizeof(p_vector)); + if (ret) { + pr_err("error: gcm: p_vector and dec do not match: %d\n", ret); + goto test_gcm_end; + } + + /* now the kernel crypto part */ + assoc2 = kmalloc(sizeof(assoc), GFP_KERNEL); + if (IS_ERR(assoc2)) { + pr_err("error: kmalloc failed\n"); + goto test_gcm_end; + } + memset(assoc2, 0, sizeof(assoc)); + memcpy(assoc2, assoc, sizeof(assoc)); + + iv = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL); + if (IS_ERR(iv)) { + pr_err("error: kmalloc failed\n"); + goto test_gcm_end; + } + memset(iv, 0, AES_BLOCK_SIZE); + memcpy(iv, ivstr, AES_BLOCK_SIZE); + + enc2 = kmalloc(decryptLen, GFP_KERNEL); + if (IS_ERR(enc2)) { + pr_err("error: kmalloc failed\n"); + goto test_gcm_end; + } + + dec2 = kmalloc(decryptLen, GFP_KERNEL); + if (IS_ERR(dec2)) { + pr_err("error: kmalloc failed\n"); + goto test_gcm_end; + } + + memset(enc2, 0, decryptLen); + memset(dec2, 0, decryptLen); + memcpy(dec2, p_vector, sizeof(p_vector)); + + tfm = crypto_alloc_aead(WOLFKM_AESGCM_NAME, 0, 0); + if (IS_ERR(tfm)) { + pr_err("error: allocating AES skcipher algorithm %s failed: %ld\n", + WOLFKM_AESGCM_DRIVER, PTR_ERR(tfm)); + goto test_gcm_end; + } + + driver_name = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); + if (strcmp(driver_name, WOLFKM_AESGCM_DRIVER)) { + pr_err("error: unexpected implementation for %s: %s (expected %s)\n", + WOLFKM_AESGCM_NAME, driver_name, WOLFKM_AESGCM_DRIVER); + ret = -ENOENT; + goto test_gcm_end; + } + + ret = crypto_aead_setkey(tfm, key32, AES_BLOCK_SIZE * 2); + if (ret) { + pr_err("error: crypto_aead_setkey returned: %d\n", ret); + goto test_gcm_end; + } + + ret = crypto_aead_setauthsize(tfm, sizeof(authTag)); + if (ret) { + pr_err("error: crypto_aead_setauthsize returned: %d\n", ret); + goto test_gcm_end; + } + + req = aead_request_alloc(tfm, GFP_KERNEL); + if (IS_ERR(req)) { + pr_err("error: allocating AES aead request %s failed: %ld\n", + WOLFKM_AESCBC_DRIVER, PTR_ERR(req)); + goto test_gcm_end; + } + + src = kmalloc(sizeof(struct scatterlist) * 2, GFP_KERNEL); + dst = kmalloc(sizeof(struct scatterlist) * 2, GFP_KERNEL); + + if (IS_ERR(src) || IS_ERR(dst)) { + pr_err("error: kmalloc src or dst failed: %ld, %ld\n", + PTR_ERR(src), PTR_ERR(dst)); + goto test_gcm_end; + } + + sg_init_table(src, 2); + sg_set_buf(src, assoc2, sizeof(assoc)); + sg_set_buf(&src[1], dec2, sizeof(p_vector)); + + sg_init_table(dst, 2); + sg_set_buf(dst, assoc2, sizeof(assoc)); + sg_set_buf(&dst[1], enc2, decryptLen); + + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, sizeof(assoc)); + aead_request_set_crypt(req, src, dst, sizeof(p_vector), iv); + + ret = crypto_aead_encrypt(req); + + if (ret) { + pr_err("error: crypto_aead_encrypt returned: %d\n", ret); + goto test_gcm_end; + } + + ret = XMEMCMP(enc, enc2, sizeof(p_vector)); + if (ret) { + pr_err("error: enc and enc2 do not match: %d\n", ret); + goto test_gcm_end; + } + + ret = XMEMCMP(authTag, enc2 + encryptLen, sizeof(authTag)); + if (ret) { + pr_err("error: authTags do not match: %d\n", ret); + goto test_gcm_end; + } + + /* Now decrypt crypto request. Reverse src and dst. */ + memset(dec2, 0, decryptLen); + aead_request_set_ad(req, sizeof(assoc)); + aead_request_set_crypt(req, dst, src, decryptLen, iv); + + ret = crypto_aead_decrypt(req); + + if (ret) { + pr_err("error: crypto_aead_decrypt returned: %d\n", ret); + goto test_gcm_end; + } + + ret = XMEMCMP(dec, dec2, sizeof(p_vector)); + if (ret) { + pr_err("error: dec and dec2 do not match: %d\n", ret); + goto test_gcm_end; + } + +test_gcm_end: + if (req) { aead_request_free(req); req = NULL; } + if (tfm) { crypto_free_aead(tfm); tfm = NULL; } + + if (src) { kfree(src); src = NULL; } + if (dst) { kfree(dst); dst = NULL; } + + if (dec2) { kfree(dec2); dec2 = NULL; } + if (enc2) { kfree(enc2); enc2 = NULL; } + + if (assoc2) { kfree(assoc2); assoc2 = NULL; } + if (iv) { kfree(iv); iv = NULL; } + + return ret; +} + +#endif /* HAVE_AESGCM && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESGCM) && + * (! (WOLFSSL_AESNI && WC_AES_C_DYNAMIC_FALLBACK)) + */ + +#if defined(WOLFSSL_AES_XTS) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESXTS)) + +/* test vectors from + * http://csrc.nist.gov/groups/STM/cavp/block-cipher-modes.html + */ +#ifdef WOLFSSL_AES_128 +static int aes_xts_128_test(void) +{ + XtsAes *aes = NULL; + int aes_inited = 0; + int ret = 0; +#define AES_XTS_128_TEST_BUF_SIZ (AES_BLOCK_SIZE * 2 + 8) + unsigned char *buf = NULL; + unsigned char *cipher = NULL; + u8 * enc2 = NULL; + u8 * dec2 = NULL; + struct scatterlist * src = NULL; + struct scatterlist * dst = NULL; + struct crypto_skcipher *tfm = NULL; + struct skcipher_request *req = NULL; + u8 iv[AES_BLOCK_SIZE]; + const char *driver_name; + + /* 128 key tests */ + static const unsigned char k1[] = { + 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35, + 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62, + 0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, + 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f + }; + + static const unsigned char i1[] = { + 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, + 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 + }; + + static const unsigned char p1[] = { + 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, + 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c + }; + + /* plain text test of partial block is not from NIST test vector list */ + static const unsigned char pp[] = { + 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, + 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c, + 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 + }; + + static const unsigned char c1[] = { + 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a, + 0x82, 0x50, 0x81, 0xd5, 0xbe, 0x47, 0x1c, 0x63 + }; + + /* plain text test of partial block is not from NIST test vector list */ + static const unsigned char cp[] = { + 0x2b, 0xf7, 0x2c, 0xf3, 0xeb, 0x85, 0xef, 0x7b, + 0x0b, 0x76, 0xa0, 0xaa, 0xf3, 0x3f, 0x25, 0x8b, + 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a + }; + + static const unsigned char k2[] = { + 0x39, 0x25, 0x79, 0x05, 0xdf, 0xcc, 0x77, 0x76, + 0x6c, 0x87, 0x0a, 0x80, 0x6a, 0x60, 0xe3, 0xc0, + 0x93, 0xd1, 0x2a, 0xcf, 0xcb, 0x51, 0x42, 0xfa, + 0x09, 0x69, 0x89, 0x62, 0x5b, 0x60, 0xdb, 0x16 + }; + + static const unsigned char i2[] = { + 0x5c, 0xf7, 0x9d, 0xb6, 0xc5, 0xcd, 0x99, 0x1a, + 0x1c, 0x78, 0x81, 0x42, 0x24, 0x95, 0x1e, 0x84 + }; + + static const unsigned char p2[] = { + 0xbd, 0xc5, 0x46, 0x8f, 0xbc, 0x8d, 0x50, 0xa1, + 0x0d, 0x1c, 0x85, 0x7f, 0x79, 0x1c, 0x5c, 0xba, + 0xb3, 0x81, 0x0d, 0x0d, 0x73, 0xcf, 0x8f, 0x20, + 0x46, 0xb1, 0xd1, 0x9e, 0x7d, 0x5d, 0x8a, 0x56 + }; + + static const unsigned char c2[] = { + 0xd6, 0xbe, 0x04, 0x6d, 0x41, 0xf2, 0x3b, 0x5e, + 0xd7, 0x0b, 0x6b, 0x3d, 0x5c, 0x8e, 0x66, 0x23, + 0x2b, 0xe6, 0xb8, 0x07, 0xd4, 0xdc, 0xc6, 0x0e, + 0xff, 0x8d, 0xbc, 0x1d, 0x9f, 0x7f, 0xc8, 0x22 + }; + +#ifndef HAVE_FIPS /* FIPS requires different keys for main and tweak. */ + static const unsigned char k3[] = { + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + }; + static const unsigned char i3[] = { + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + }; + static const unsigned char p3[] = { + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0xff, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 + }; + static const unsigned char c3[] = { + 0xA2, 0x07, 0x47, 0x76, 0x3F, 0xEC, 0x0C, 0x23, + 0x1B, 0xD0, 0xBD, 0x46, 0x9A, 0x27, 0x38, 0x12, + 0x95, 0x02, 0x3D, 0x5D, 0xC6, 0x94, 0x51, 0x36, + 0xA0, 0x85, 0xD2, 0x69, 0x6E, 0x87, 0x0A, 0xBF, + 0xB5, 0x5A, 0xDD, 0xCB, 0x80, 0xE0, 0xFC, 0xCD + }; +#endif /* HAVE_FIPS */ + + if ((aes = (XtsAes *)XMALLOC(sizeof(*aes), NULL, DYNAMIC_TYPE_AES)) + == NULL) + { + ret = MEMORY_E; + goto out; + } + + if ((buf = (unsigned char *)XMALLOC(AES_XTS_128_TEST_BUF_SIZ, NULL, + DYNAMIC_TYPE_AES)) == NULL) + { + ret = MEMORY_E; + goto out; + } + if ((cipher = (unsigned char *)XMALLOC(AES_XTS_128_TEST_BUF_SIZ, NULL, + DYNAMIC_TYPE_AES)) == NULL) + { + ret = MEMORY_E; + goto out; + } + + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsInit(aes, NULL, INVALID_DEVID); + if (ret != 0) + goto out; + else + aes_inited = 1; + + ret = wc_AesXtsSetKeyNoInit(aes, k2, sizeof(k2), AES_ENCRYPTION); + if (ret != 0) + goto out; + + ret = wc_AesXtsEncrypt(aes, buf, p2, sizeof(p2), i2, sizeof(i2)); + if (ret != 0) + goto out; + if (XMEMCMP(c2, buf, sizeof(c2))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + +#if defined(DEBUG_VECTOR_REGISTER_ACCESS) && defined(WC_AES_C_DYNAMIC_FALLBACK) + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(SYSLIB_FAILED_E); + ret = wc_AesXtsEncrypt(aes, buf, p2, sizeof(p2), i2, sizeof(i2)); + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(0); + if (ret != 0) + goto out; + if (XMEMCMP(c2, buf, sizeof(c2))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } +#endif + + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + + ret = wc_AesXtsSetKeyNoInit(aes, k1, sizeof(k1), AES_ENCRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsEncrypt(aes, buf, p1, sizeof(p1), i1, sizeof(i1)); + if (ret != 0) + goto out; + if (XMEMCMP(c1, buf, AES_BLOCK_SIZE)) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + +#if defined(DEBUG_VECTOR_REGISTER_ACCESS) && defined(WC_AES_C_DYNAMIC_FALLBACK) + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(SYSLIB_FAILED_E); + ret = wc_AesXtsEncrypt(aes, buf, p1, sizeof(p1), i1, sizeof(i1)); + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(0); + if (ret != 0) + goto out; + if (XMEMCMP(c1, buf, AES_BLOCK_SIZE)) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } +#endif + + /* partial block encryption test */ + XMEMSET(cipher, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsEncrypt(aes, cipher, pp, sizeof(pp), i1, sizeof(i1)); + if (ret != 0) + goto out; + if (XMEMCMP(cp, cipher, sizeof(cp))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + +#if defined(DEBUG_VECTOR_REGISTER_ACCESS) && defined(WC_AES_C_DYNAMIC_FALLBACK) + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(SYSLIB_FAILED_E); + XMEMSET(cipher, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsEncrypt(aes, cipher, pp, sizeof(pp), i1, sizeof(i1)); + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(0); + if (ret != 0) + goto out; + if (XMEMCMP(cp, cipher, sizeof(cp))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } +#endif + + /* partial block decrypt test */ + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsSetKeyNoInit(aes, k1, sizeof(k1), AES_DECRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsDecrypt(aes, buf, cipher, sizeof(pp), i1, sizeof(i1)); + if (ret != 0) + goto out; + if (XMEMCMP(pp, buf, sizeof(pp))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + +#if defined(DEBUG_VECTOR_REGISTER_ACCESS) && defined(WC_AES_C_DYNAMIC_FALLBACK) + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(SYSLIB_FAILED_E); + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsDecrypt(aes, buf, cipher, sizeof(pp), i1, sizeof(i1)); + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(0); + if (ret != 0) + goto out; + if (XMEMCMP(pp, buf, sizeof(pp))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } +#endif + + /* NIST decrypt test vector */ + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsDecrypt(aes, buf, c1, sizeof(c1), i1, sizeof(i1)); + if (ret != 0) + goto out; + if (XMEMCMP(p1, buf, AES_BLOCK_SIZE)) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + +#if defined(DEBUG_VECTOR_REGISTER_ACCESS) && defined(WC_AES_C_DYNAMIC_FALLBACK) + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(SYSLIB_FAILED_E); + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsDecrypt(aes, buf, c1, sizeof(c1), i1, sizeof(i1)); + WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(0); + if (ret != 0) + goto out; + if (XMEMCMP(p1, buf, AES_BLOCK_SIZE)) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } +#endif + + /* fail case with decrypting using wrong key */ + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsDecrypt(aes, buf, c2, sizeof(c2), i2, sizeof(i2)); + if (ret != 0) + goto out; + if (XMEMCMP(p2, buf, sizeof(p2)) == 0) { /* fail case with wrong key */ + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + + /* set correct key and retest */ + XMEMSET(buf, 0, AES_XTS_128_TEST_BUF_SIZ); + ret = wc_AesXtsSetKeyNoInit(aes, k2, sizeof(k2), AES_DECRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsDecrypt(aes, buf, c2, sizeof(c2), i2, sizeof(i2)); + if (ret != 0) + goto out; + if (XMEMCMP(p2, buf, sizeof(p2))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + +#ifndef HAVE_FIPS + + /* Test ciphertext stealing in-place. */ + XMEMCPY(buf, p3, sizeof(p3)); + ret = wc_AesXtsSetKeyNoInit(aes, k3, sizeof(k3), AES_ENCRYPTION); + if (ret != 0) + goto out; + + ret = wc_AesXtsEncrypt(aes, buf, buf, sizeof(p3), i3, sizeof(i3)); + if (ret != 0) + goto out; + if (XMEMCMP(c3, buf, sizeof(c3))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + + ret = wc_AesXtsSetKeyNoInit(aes, k3, sizeof(k3), AES_DECRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsDecrypt(aes, buf, buf, sizeof(c3), i3, sizeof(i3)); + if (ret != 0) + goto out; + if (XMEMCMP(p3, buf, sizeof(p3))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + +#endif /* HAVE_FIPS */ + + { + #define LARGE_XTS_SZ 1024 + byte* large_input = (byte *)XMALLOC(LARGE_XTS_SZ, NULL, + DYNAMIC_TYPE_TMP_BUFFER); + int i; + int j; + + if (large_input == NULL) + ret = MEMORY_E; + goto out; + + for (i = 0; i < (int)LARGE_XTS_SZ; i++) + large_input[i] = (byte)i; + + for (j = 16; j < (int)LARGE_XTS_SZ; j++) { + ret = wc_AesXtsSetKeyNoInit(aes, k1, sizeof(k1), AES_ENCRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsEncrypt(aes, large_input, large_input, j, i1, + sizeof(i1)); + if (ret != 0) + goto out; + + ret = wc_AesXtsSetKeyNoInit(aes, k1, sizeof(k1), AES_DECRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsDecrypt(aes, large_input, large_input, j, i1, + sizeof(i1)); + if (ret != 0) + goto out; + for (i = 0; i < j; i++) { + if (large_input[i] != (byte)i) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + } + } + XFREE(large_input, NULL, DYNAMIC_TYPE_TMP_BUFFER); + } + + /* now the kernel crypto part */ + + enc2 = XMALLOC(sizeof(p1), NULL, DYNAMIC_TYPE_AES); + if (!enc2) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + dec2 = XMALLOC(sizeof(p1), NULL, DYNAMIC_TYPE_AES); + if (!dec2) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + src = XMALLOC(sizeof(*src) * 2, NULL, DYNAMIC_TYPE_AES); + if (! src) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + dst = XMALLOC(sizeof(*dst) * 2, NULL, DYNAMIC_TYPE_AES); + if (! dst) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + tfm = crypto_alloc_skcipher(WOLFKM_AESXTS_NAME, 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + pr_err("error: allocating AES skcipher algorithm %s failed: %d\n", + WOLFKM_AESXTS_DRIVER, ret); + goto test_xts_end; + } + + driver_name = crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); + if (strcmp(driver_name, WOLFKM_AESXTS_DRIVER)) { + pr_err("error: unexpected implementation for %s: %s (expected %s)\n", + WOLFKM_AESXTS_NAME, driver_name, WOLFKM_AESXTS_DRIVER); + ret = -ENOENT; + goto test_xts_end; + } + + ret = crypto_skcipher_ivsize(tfm); + if (ret != sizeof(iv)) { + pr_err("error: AES skcipher algorithm %s crypto_skcipher_ivsize()" + " returned %d but expected %d\n", + WOLFKM_AESXTS_DRIVER, ret, (int)sizeof(iv)); + ret = -EINVAL; + goto test_xts_end; + } + + ret = crypto_skcipher_setkey(tfm, k1, sizeof(k1)); + if (ret) { + pr_err("error: crypto_skcipher_setkey for %s returned: %d\n", + WOLFKM_AESXTS_NAME, ret); + goto test_xts_end; + } + + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + pr_err("error: allocating AES skcipher request %s failed: %d\n", + WOLFKM_AESXTS_DRIVER, ret); + goto test_xts_end; + } + + memcpy(dec2, p1, sizeof(p1)); + memset(enc2, 0, sizeof(p1)); + + sg_init_one(src, dec2, sizeof(p1)); + sg_init_one(dst, enc2, sizeof(p1)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(p1), iv); + + ret = crypto_skcipher_encrypt(req); + + if (ret) { + pr_err("error: crypto_skcipher_encrypt returned: %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(c1, enc2, sizeof(c1)); + if (ret) { + pr_err("error: c1 and enc2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + memset(dec2, 0, sizeof(p1)); + sg_init_one(src, enc2, sizeof(p1)); + sg_init_one(dst, dec2, sizeof(p1)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(p1), iv); + + ret = crypto_skcipher_decrypt(req); + + if (ret) { + pr_err("ERROR: crypto_skcipher_decrypt returned %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(p1, dec2, sizeof(p1)); + if (ret) { + pr_err("error: p1 and dec2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + memcpy(dec2, pp, sizeof(pp)); + memset(enc2, 0, sizeof(pp)); + + sg_init_one(src, dec2, sizeof(pp)); + sg_init_one(dst, enc2, sizeof(pp)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(pp), iv); + + ret = crypto_skcipher_encrypt(req); + + if (ret) { + pr_err("error: crypto_skcipher_encrypt returned: %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(cp, enc2, sizeof(cp)); + if (ret) { + pr_err("error: cp and enc2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + memset(dec2, 0, sizeof(pp)); + sg_init_one(src, enc2, sizeof(pp)); + sg_init_one(dst, dec2, sizeof(pp)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(pp), iv); + + ret = crypto_skcipher_decrypt(req); + + if (ret) { + pr_err("ERROR: crypto_skcipher_decrypt returned %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(pp, dec2, sizeof(pp)); + if (ret) { + pr_err("error: pp and dec2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + test_xts_end: + + if (enc2) + XFREE(enc2, NULL, DYNAMIC_TYPE_AES); + if (dec2) + XFREE(dec2, NULL, DYNAMIC_TYPE_AES); + if (src) + XFREE(src, NULL, DYNAMIC_TYPE_AES); + if (dst) + XFREE(dst, NULL, DYNAMIC_TYPE_AES); + if (req) + skcipher_request_free(req); + if (tfm) + crypto_free_skcipher(tfm); + + out: + + if (aes_inited) + wc_AesXtsFree(aes); + + if (buf) + XFREE(buf, NULL, DYNAMIC_TYPE_AES); + if (cipher) + XFREE(cipher, NULL, DYNAMIC_TYPE_AES); + + if (aes) + XFREE(aes, NULL, DYNAMIC_TYPE_AES); + +#undef AES_XTS_128_TEST_BUF_SIZ + + return ret; +} +#endif /* WOLFSSL_AES_128 */ + +#ifdef WOLFSSL_AES_256 +static int aes_xts_256_test(void) +{ + XtsAes *aes = NULL; + int aes_inited = 0; + int ret = 0; +#define AES_XTS_256_TEST_BUF_SIZ (AES_BLOCK_SIZE * 3) + unsigned char *buf = NULL; + unsigned char *cipher = NULL; + u8 * enc2 = NULL; + u8 * dec2 = NULL; + struct scatterlist * src = NULL; + struct scatterlist * dst = NULL; + struct crypto_skcipher *tfm = NULL; + struct skcipher_request *req = NULL; + u8 iv[AES_BLOCK_SIZE]; + const char *driver_name; + + /* 256 key tests */ + static const unsigned char k1[] = { + 0x1e, 0xa6, 0x61, 0xc5, 0x8d, 0x94, 0x3a, 0x0e, + 0x48, 0x01, 0xe4, 0x2f, 0x4b, 0x09, 0x47, 0x14, + 0x9e, 0x7f, 0x9f, 0x8e, 0x3e, 0x68, 0xd0, 0xc7, + 0x50, 0x52, 0x10, 0xbd, 0x31, 0x1a, 0x0e, 0x7c, + 0xd6, 0xe1, 0x3f, 0xfd, 0xf2, 0x41, 0x8d, 0x8d, + 0x19, 0x11, 0xc0, 0x04, 0xcd, 0xa5, 0x8d, 0xa3, + 0xd6, 0x19, 0xb7, 0xe2, 0xb9, 0x14, 0x1e, 0x58, + 0x31, 0x8e, 0xea, 0x39, 0x2c, 0xf4, 0x1b, 0x08 + }; + + static const unsigned char i1[] = { + 0xad, 0xf8, 0xd9, 0x26, 0x27, 0x46, 0x4a, 0xd2, + 0xf0, 0x42, 0x8e, 0x84, 0xa9, 0xf8, 0x75, 0x64 + }; + + static const unsigned char p1[] = { + 0x2e, 0xed, 0xea, 0x52, 0xcd, 0x82, 0x15, 0xe1, + 0xac, 0xc6, 0x47, 0xe8, 0x10, 0xbb, 0xc3, 0x64, + 0x2e, 0x87, 0x28, 0x7f, 0x8d, 0x2e, 0x57, 0xe3, + 0x6c, 0x0a, 0x24, 0xfb, 0xc1, 0x2a, 0x20, 0x2e + }; + + static const unsigned char c1[] = { + 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, + 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, + 0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, + 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb + }; + + /* plain text test of partial block is not from NIST test vector list */ + static const unsigned char pp[] = { + 0xeb, 0xab, 0xce, 0x95, 0xb1, 0x4d, 0x3c, 0x8d, + 0x6f, 0xb3, 0x50, 0x39, 0x07, 0x90, 0x31, 0x1c, + 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 + }; + + static const unsigned char cp[] = { + 0x65, 0x5e, 0x1d, 0x37, 0x4a, 0x91, 0xe7, 0x6c, + 0x4f, 0x83, 0x92, 0xbc, 0x5a, 0x10, 0x55, 0x27, + 0x61, 0x0e, 0x5a, 0xde, 0xca, 0xc5, 0x12, 0xd8 + }; + + static const unsigned char k2[] = { + 0xad, 0x50, 0x4b, 0x85, 0xd7, 0x51, 0xbf, 0xba, + 0x69, 0x13, 0xb4, 0xcc, 0x79, 0xb6, 0x5a, 0x62, + 0xf7, 0xf3, 0x9d, 0x36, 0x0f, 0x35, 0xb5, 0xec, + 0x4a, 0x7e, 0x95, 0xbd, 0x9b, 0xa5, 0xf2, 0xec, + 0xc1, 0xd7, 0x7e, 0xa3, 0xc3, 0x74, 0xbd, 0x4b, + 0x13, 0x1b, 0x07, 0x83, 0x87, 0xdd, 0x55, 0x5a, + 0xb5, 0xb0, 0xc7, 0xe5, 0x2d, 0xb5, 0x06, 0x12, + 0xd2, 0xb5, 0x3a, 0xcb, 0x47, 0x8a, 0x53, 0xb4 + }; + + static const unsigned char i2[] = { + 0xe6, 0x42, 0x19, 0xed, 0xe0, 0xe1, 0xc2, 0xa0, + 0x0e, 0xf5, 0x58, 0x6a, 0xc4, 0x9b, 0xeb, 0x6f + }; + + static const unsigned char p2[] = { + 0x24, 0xcb, 0x76, 0x22, 0x55, 0xb5, 0xa8, 0x00, + 0xf4, 0x6e, 0x80, 0x60, 0x56, 0x9e, 0x05, 0x53, + 0xbc, 0xfe, 0x86, 0x55, 0x3b, 0xca, 0xd5, 0x89, + 0xc7, 0x54, 0x1a, 0x73, 0xac, 0xc3, 0x9a, 0xbd, + 0x53, 0xc4, 0x07, 0x76, 0xd8, 0xe8, 0x22, 0x61, + 0x9e, 0xa9, 0xad, 0x77, 0xa0, 0x13, 0x4c, 0xfc + }; + + static const unsigned char c2[] = { + 0xa3, 0xc6, 0xf3, 0xf3, 0x82, 0x79, 0x5b, 0x10, + 0x87, 0xd7, 0x02, 0x50, 0xdb, 0x2c, 0xd3, 0xb1, + 0xa1, 0x62, 0xa8, 0xb6, 0xdc, 0x12, 0x60, 0x61, + 0xc1, 0x0a, 0x84, 0xa5, 0x85, 0x3f, 0x3a, 0x89, + 0xe6, 0x6c, 0xdb, 0xb7, 0x9a, 0xb4, 0x28, 0x9b, + 0xc3, 0xea, 0xd8, 0x10, 0xe9, 0xc0, 0xaf, 0x92 + }; + + if ((aes = (XtsAes *)XMALLOC(sizeof(*aes), NULL, DYNAMIC_TYPE_AES)) + == NULL) + { + ret = MEMORY_E; + goto out; + } + + if ((buf = (unsigned char *)XMALLOC(AES_XTS_256_TEST_BUF_SIZ, NULL, + DYNAMIC_TYPE_AES)) == NULL) + { + ret = MEMORY_E; + goto out; + } + if ((cipher = (unsigned char *)XMALLOC(AES_XTS_256_TEST_BUF_SIZ, NULL, + DYNAMIC_TYPE_AES)) == NULL) + { + ret = MEMORY_E; + goto out; + } + + ret = wc_AesXtsInit(aes, NULL, INVALID_DEVID); + if (ret != 0) + goto out; + else + aes_inited = 1; + + XMEMSET(buf, 0, AES_XTS_256_TEST_BUF_SIZ); + ret = wc_AesXtsSetKeyNoInit(aes, k2, sizeof(k2), AES_ENCRYPTION); + if (ret != 0) + goto out; + + ret = wc_AesXtsEncrypt(aes, buf, p2, sizeof(p2), i2, sizeof(i2)); + if (ret != 0) + goto out; + if (XMEMCMP(c2, buf, sizeof(c2))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + + XMEMSET(buf, 0, AES_XTS_256_TEST_BUF_SIZ); + ret = wc_AesXtsSetKeyNoInit(aes, k1, sizeof(k1), AES_ENCRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsEncrypt(aes, buf, p1, sizeof(p1), i1, sizeof(i1)); + if (ret != 0) + goto out; + if (XMEMCMP(c1, buf, AES_BLOCK_SIZE)) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + + /* partial block encryption test */ + XMEMSET(cipher, 0, AES_XTS_256_TEST_BUF_SIZ); + ret = wc_AesXtsEncrypt(aes, cipher, pp, sizeof(pp), i1, sizeof(i1)); + if (ret != 0) + goto out; + + /* partial block decrypt test */ + XMEMSET(buf, 0, AES_XTS_256_TEST_BUF_SIZ); + ret = wc_AesXtsSetKeyNoInit(aes, k1, sizeof(k1), AES_DECRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsDecrypt(aes, buf, cipher, sizeof(pp), i1, sizeof(i1)); + if (ret != 0) + goto out; + if (XMEMCMP(pp, buf, sizeof(pp))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + + /* NIST decrypt test vector */ + XMEMSET(buf, 0, AES_XTS_256_TEST_BUF_SIZ); + ret = wc_AesXtsDecrypt(aes, buf, c1, sizeof(c1), i1, sizeof(i1)); + if (ret != 0) + goto out; + if (XMEMCMP(p1, buf, AES_BLOCK_SIZE)) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + + XMEMSET(buf, 0, AES_XTS_256_TEST_BUF_SIZ); + ret = wc_AesXtsSetKeyNoInit(aes, k2, sizeof(k2), AES_DECRYPTION); + if (ret != 0) + goto out; + ret = wc_AesXtsDecrypt(aes, buf, c2, sizeof(c2), i2, sizeof(i2)); + if (ret != 0) + goto out; + if (XMEMCMP(p2, buf, sizeof(p2))) { + ret = LINUXKM_LKCAPI_AES_KAT_MISMATCH_E; + goto out; + } + + /* now the kernel crypto part */ + + enc2 = XMALLOC(sizeof(p1), NULL, DYNAMIC_TYPE_AES); + if (!enc2) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + dec2 = XMALLOC(sizeof(p1), NULL, DYNAMIC_TYPE_AES); + if (!dec2) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + src = XMALLOC(sizeof(*src) * 2, NULL, DYNAMIC_TYPE_AES); + if (! src) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + dst = XMALLOC(sizeof(*dst) * 2, NULL, DYNAMIC_TYPE_AES); + if (! dst) { + pr_err("error: malloc failed\n"); + ret = -ENOMEM; + goto test_xts_end; + } + + tfm = crypto_alloc_skcipher(WOLFKM_AESXTS_NAME, 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + pr_err("error: allocating AES skcipher algorithm %s failed: %d\n", + WOLFKM_AESXTS_DRIVER, ret); + goto test_xts_end; + } + + driver_name = crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); + if (strcmp(driver_name, WOLFKM_AESXTS_DRIVER)) { + pr_err("error: unexpected implementation for %s: %s (expected %s)\n", + WOLFKM_AESXTS_NAME, driver_name, WOLFKM_AESXTS_DRIVER); + ret = -ENOENT; + goto test_xts_end; + } + + ret = crypto_skcipher_ivsize(tfm); + if (ret != sizeof(iv)) { + pr_err("error: AES skcipher algorithm %s crypto_skcipher_ivsize()" + " returned %d but expected %d\n", + WOLFKM_AESXTS_DRIVER, ret, (int)sizeof(iv)); + ret = -EINVAL; + goto test_xts_end; + } + + ret = crypto_skcipher_setkey(tfm, k1, sizeof(k1)); + if (ret) { + pr_err("error: crypto_skcipher_setkey for %s returned: %d\n", + WOLFKM_AESXTS_NAME, ret); + goto test_xts_end; + } + + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (IS_ERR(req)) { + ret = PTR_ERR(req); + pr_err("error: allocating AES skcipher request %s failed: %d\n", + WOLFKM_AESXTS_DRIVER, ret); + goto test_xts_end; + } + + memcpy(dec2, p1, sizeof(p1)); + memset(enc2, 0, sizeof(p1)); + + sg_init_one(src, dec2, sizeof(p1)); + sg_init_one(dst, enc2, sizeof(p1)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(p1), iv); + + ret = crypto_skcipher_encrypt(req); + + if (ret) { + pr_err("error: crypto_skcipher_encrypt returned: %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(c1, enc2, sizeof(c1)); + if (ret) { + pr_err("error: c1 and enc2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + memset(dec2, 0, sizeof(p1)); + sg_init_one(src, enc2, sizeof(p1)); + sg_init_one(dst, dec2, sizeof(p1)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(p1), iv); + + ret = crypto_skcipher_decrypt(req); + + if (ret) { + pr_err("ERROR: crypto_skcipher_decrypt returned %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(p1, dec2, sizeof(p1)); + if (ret) { + pr_err("error: p1 and dec2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + memcpy(dec2, pp, sizeof(pp)); + memset(enc2, 0, sizeof(pp)); + + sg_init_one(src, dec2, sizeof(pp)); + sg_init_one(dst, enc2, sizeof(pp)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(pp), iv); + + ret = crypto_skcipher_encrypt(req); + + if (ret) { + pr_err("error: crypto_skcipher_encrypt returned: %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(cp, enc2, sizeof(cp)); + if (ret) { + pr_err("error: cp and enc2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + memset(dec2, 0, sizeof(pp)); + sg_init_one(src, enc2, sizeof(pp)); + sg_init_one(dst, dec2, sizeof(pp)); + + memcpy(iv, i1, sizeof(iv)); + skcipher_request_set_crypt(req, src, dst, sizeof(pp), iv); + + ret = crypto_skcipher_decrypt(req); + + if (ret) { + pr_err("ERROR: crypto_skcipher_decrypt returned %d\n", ret); + goto test_xts_end; + } + + ret = XMEMCMP(pp, dec2, sizeof(pp)); + if (ret) { + pr_err("error: pp and dec2 do not match: %d\n", ret); + ret = -EINVAL; + goto test_xts_end; + } + + test_xts_end: + + if (enc2) + XFREE(enc2, NULL, DYNAMIC_TYPE_AES); + if (dec2) + XFREE(dec2, NULL, DYNAMIC_TYPE_AES); + if (src) + XFREE(src, NULL, DYNAMIC_TYPE_AES); + if (dst) + XFREE(dst, NULL, DYNAMIC_TYPE_AES); + if (req) + skcipher_request_free(req); + if (tfm) + crypto_free_skcipher(tfm); + + out: + + if (aes_inited) + wc_AesXtsFree(aes); + + if (buf) + XFREE(buf, NULL, DYNAMIC_TYPE_AES); + if (cipher) + XFREE(cipher, NULL, DYNAMIC_TYPE_AES); + + if (aes) + XFREE(aes, NULL, DYNAMIC_TYPE_AES); + +#undef AES_XTS_256_TEST_BUF_SIZ + + return ret; +} +#endif /* WOLFSSL_AES_256 */ + +static int linuxkm_test_aesxts(void) { + int ret; + + #ifdef WOLFSSL_AES_128 + ret = aes_xts_128_test(); + if (ret != 0) { + pr_err("aes_xts_128_test() failed with retval %d.\n", ret); + goto out; + } + #endif + #ifdef WOLFSSL_AES_256 + ret = aes_xts_256_test(); + if (ret != 0) { + pr_err("aes_xts_256_test() failed with retval %d.\n", ret); + goto out; + } + #endif + +out: + + return ret; +} + +#endif /* WOLFSSL_AES_XTS && + * (LINUXKM_LKCAPI_REGISTER_ALL || LINUXKM_LKCAPI_REGISTER_AESXTS) + */ + +#endif /* !NO_AES */ + +static int linuxkm_lkcapi_register(void) +{ + int ret = 0; + +#define REGISTER_ALG(alg, installer, tester) do { \ + if (alg ## _loaded) { \ + pr_err("ERROR: %s is already registered.\n", \ + (alg).base.cra_driver_name); \ + return -EEXIST; \ + } \ + \ + ret = (installer)(&(alg)); \ + \ + if (ret) { \ + pr_err("ERROR: " #installer " for %s failed " \ + "with return code %d.\n", \ + (alg).base.cra_driver_name, ret); \ + return ret; \ + } \ + \ + alg ## _loaded = 1; \ + \ + ret = (tester()); \ + \ + if (ret) { \ + pr_err("ERROR: self-test for %s failed " \ + "with return code %d.\n", \ + (alg).base.cra_driver_name, ret); \ + return ret; \ + } \ + pr_info("%s self-test OK -- " \ + "registered for %s with priority %d.\n", \ + (alg).base.cra_driver_name, \ + (alg).base.cra_name, \ + (alg).base.cra_priority); \ + } while (0) + +#if defined(HAVE_AES_CBC) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCBC)) + + REGISTER_ALG(cbcAesAlg, crypto_register_skcipher, linuxkm_test_aescbc); +#endif + +#if defined(WOLFSSL_AES_CFB) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCFB)) + + REGISTER_ALG(cfbAesAlg, crypto_register_skcipher, linuxkm_test_aescfb); +#endif + +#if defined(HAVE_AESGCM) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESGCM)) && \ + (! (defined(WOLFSSL_AESNI) && defined(WC_AES_C_DYNAMIC_FALLBACK))) + + REGISTER_ALG(gcmAesAead, crypto_register_aead, linuxkm_test_aesgcm); +#endif + +#if defined(WOLFSSL_AES_XTS) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESXTS)) + + REGISTER_ALG(xtsAesAlg, crypto_register_skcipher, linuxkm_test_aesxts); +#endif + +#undef REGISTER_ALG + + return 0; +} + +static void linuxkm_lkcapi_unregister(void) +{ +#define UNREGISTER_ALG(alg, uninstaller) do { \ + if (alg ## _loaded) { \ + (uninstaller)(&(alg)); \ + alg ## _loaded = 0; \ + } \ + } while (0) + +#if defined(HAVE_AES_CBC) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCBC)) + + UNREGISTER_ALG(cbcAesAlg, crypto_unregister_skcipher); +#endif +#if defined(WOLFSSL_AES_CFB) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESCFB)) + + UNREGISTER_ALG(cfbAesAlg, crypto_unregister_skcipher); +#endif +#if defined(HAVE_AESGCM) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESGCM)) && \ + (! (defined(WOLFSSL_AESNI) && defined(WC_AES_C_DYNAMIC_FALLBACK))) + + UNREGISTER_ALG(gcmAesAead, crypto_unregister_aead); +#endif +#if defined(WOLFSSL_AES_XTS) && \ + (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ + defined(LINUXKM_LKCAPI_REGISTER_AESXTS)) + + UNREGISTER_ALG(xtsAesAlg, crypto_unregister_skcipher); +#endif + +#undef UNREGISTER_ALG +} diff --git a/linuxkm/module_hooks.c b/linuxkm/module_hooks.c index 8ee34dff6..76c104db9 100644 --- a/linuxkm/module_hooks.c +++ b/linuxkm/module_hooks.c @@ -47,7 +47,6 @@ #endif #ifndef NO_CRYPT_TEST #include - #include #endif static int libwolfssl_cleanup(void) { @@ -71,6 +70,8 @@ static int libwolfssl_cleanup(void) { #ifdef HAVE_LINUXKM_PIE_SUPPORT +#ifdef DEBUG_LINUXKM_PIE_SUPPORT + extern int wolfCrypt_PIE_first_function(void); extern int wolfCrypt_PIE_last_function(void); extern const unsigned int wolfCrypt_PIE_rodata_start[]; @@ -90,6 +91,8 @@ static unsigned int hash_span(char *start, char *end) { return sum; } +#endif /* DEBUG_LINUXKM_PIE_SUPPORT */ + #ifdef USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE extern struct wolfssl_linuxkm_pie_redirect_table wolfssl_linuxkm_pie_redirect_table; static int set_up_wolfssl_linuxkm_pie_redirect_table(void); @@ -118,7 +121,6 @@ static int updateFipsHash(void); #endif #ifdef WOLFSSL_LINUXKM_BENCHMARKS -#undef HAVE_PTHREAD #define STRING_USER #define NO_MAIN_FUNCTION #define current_time benchmark_current_time @@ -126,6 +128,10 @@ static int updateFipsHash(void); #include "wolfcrypt/benchmark/benchmark.c" #endif /* WOLFSSL_LINUXKM_BENCHMARKS */ +#ifdef LINUXKM_LKCAPI_REGISTER + #include "linuxkm/lkcapi_glue.c" +#endif + #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) static int __init wolfssl_init(void) #else @@ -152,7 +158,7 @@ static int wolfssl_init(void) return ret; #endif -#ifdef HAVE_LINUXKM_PIE_SUPPORT +#if defined(HAVE_LINUXKM_PIE_SUPPORT) && defined(DEBUG_LINUXKM_PIE_SUPPORT) #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 0) /* see linux commit ac3b432839 */ @@ -219,7 +225,7 @@ static int wolfssl_init(void) text_hash, pie_text_end-pie_text_start, rodata_hash, pie_rodata_end-pie_rodata_start); } -#endif /* HAVE_LINUXKM_PIE_SUPPORT */ +#endif /* HAVE_LINUXKM_PIE_SUPPORT && DEBUG_LINUXKM_PIE_SUPPORT */ #ifdef HAVE_FIPS ret = wolfCrypt_SetCb_fips(lkmFipsCb); @@ -239,19 +245,32 @@ static int wolfssl_init(void) return -ECANCELED; } - pr_info("wolfCrypt FIPS [" - -#if defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION == 3) - "ready" -#elif defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION == 2) \ - && defined(WOLFCRYPT_FIPS_RAND) - "140-2 rand" -#elif defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION == 2) - "140-2" + pr_info("FIPS 140-3 wolfCrypt-fips v%d.%d.%d%s%s startup " + "self-test succeeded.\n", +#ifdef HAVE_FIPS_VERSION_MAJOR + HAVE_FIPS_VERSION_MAJOR, #else - "140" + HAVE_FIPS_VERSION, #endif - "] POST succeeded.\n"); +#ifdef HAVE_FIPS_VERSION_MINOR + HAVE_FIPS_VERSION_MINOR, +#else + 0, +#endif +#ifdef HAVE_FIPS_VERSION_PATCH + HAVE_FIPS_VERSION_PATCH, +#else + 0, +#endif +#ifdef HAVE_FIPS_VERSION_PORT + "-", + HAVE_FIPS_VERSION_PORT +#else + "", + "" +#endif + ); + #endif /* HAVE_FIPS */ #ifdef WC_RNG_SEED_CB @@ -287,6 +306,21 @@ static int wolfssl_init(void) return -ECANCELED; } pr_info("wolfCrypt self-test passed.\n"); +#else + pr_info("skipping full wolfcrypt_test() " + "(configure with --enable-crypttests to enable).\n"); +#endif + +#ifdef LINUXKM_LKCAPI_REGISTER + ret = linuxkm_lkcapi_register(); + + if (ret) { + pr_err("linuxkm_lkcapi_register() failed with return code %d.\n", ret); + linuxkm_lkcapi_unregister(); + (void)libwolfssl_cleanup(); + msleep(10); + return -ECANCELED; + } #endif #ifdef WOLFSSL_LINUXKM_BENCHMARKS @@ -326,6 +360,10 @@ static void __exit wolfssl_exit(void) static void wolfssl_exit(void) #endif { +#ifdef LINUXKM_LKCAPI_REGISTER + linuxkm_lkcapi_unregister(); +#endif + (void)libwolfssl_cleanup(); return; @@ -375,6 +413,7 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) { #ifndef __ARCH_MEMCMP_NO_REDIRECT wolfssl_linuxkm_pie_redirect_table.memcmp = memcmp; #endif +#ifndef CONFIG_FORTIFY_SOURCE #ifndef __ARCH_MEMCPY_NO_REDIRECT wolfssl_linuxkm_pie_redirect_table.memcpy = memcpy; #endif @@ -384,6 +423,7 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) { #ifndef __ARCH_MEMMOVE_NO_REDIRECT wolfssl_linuxkm_pie_redirect_table.memmove = memmove; #endif +#endif /* !CONFIG_FORTIFY_SOURCE */ #ifndef __ARCH_STRCMP_NO_REDIRECT wolfssl_linuxkm_pie_redirect_table.strcmp = strcmp; #endif @@ -415,6 +455,11 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) { #else wolfssl_linuxkm_pie_redirect_table.printk = printk; #endif + +#ifdef CONFIG_FORTIFY_SOURCE + wolfssl_linuxkm_pie_redirect_table.__warn_printk = __warn_printk; +#endif + wolfssl_linuxkm_pie_redirect_table.snprintf = snprintf; wolfssl_linuxkm_pie_redirect_table._ctype = _ctype; @@ -516,11 +561,15 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) { /* runtime assert that the table has no null slots after initialization. */ { unsigned long *i; + static_assert(sizeof(unsigned long) == sizeof(void *), + "unexpected pointer size"); for (i = (unsigned long *)&wolfssl_linuxkm_pie_redirect_table; i < (unsigned long *)&wolfssl_linuxkm_pie_redirect_table._last_slot; ++i) if (*i == 0) { - pr_err("wolfCrypt container redirect table initialization was incomplete.\n"); + pr_err("wolfCrypt container redirect table initialization was " + "incomplete [%lu].\n", + i-(unsigned long *)&wolfssl_linuxkm_pie_redirect_table); return -EFAULT; } } @@ -711,11 +760,19 @@ static int updateFipsHash(void) } } - if (XMEMCMP(hash, binVerify, WC_SHA256_DIGEST_SIZE) == 0) + if (XMEMCMP(hash, binVerify, WC_SHA256_DIGEST_SIZE) == 0) { +#if defined(DEBUG_LINUXKM_PIE_SUPPORT) || defined(WOLFSSL_LINUXKM_VERBOSE_DEBUG) + pr_info("updateFipsHash: verifyCore already matches [%s]\n", verifyCore); +#else pr_info("updateFipsHash: verifyCore already matches.\n"); - else { +#endif + } else { XMEMCPY(verifyCore, base16_hash, WC_SHA256_DIGEST_SIZE*2 + 1); +#if defined(DEBUG_LINUXKM_PIE_SUPPORT) || defined(WOLFSSL_LINUXKM_VERBOSE_DEBUG) + pr_info("updateFipsHash: verifyCore updated [%s].\n", base16_hash); +#else pr_info("updateFipsHash: verifyCore updated.\n"); +#endif } ret = 0; diff --git a/wolfcrypt/src/aes.c b/wolfcrypt/src/aes.c index 2214f8a71..510e395dd 100644 --- a/wolfcrypt/src/aes.c +++ b/wolfcrypt/src/aes.c @@ -6383,7 +6383,6 @@ int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len) #endif return BAD_FUNC_ARG; } - #ifdef OPENSSL_EXTRA XMEMSET(aes->gcm.aadH, 0, sizeof(aes->gcm.aadH)); aes->gcm.aadLen = 0; @@ -12250,9 +12249,17 @@ int wc_AesKeyUnWrap(const byte* key, word32 keySz, const byte* in, word32 inSz, #ifdef WOLFSSL_AES_XTS -/* Galios Field to use */ +/* Galois Field to use */ #define GF_XTS 0x87 +/* Set up keys for encryption and/or decryption. + * + * aes buffer holding aes subkeys + * heap heap hint to use for memory. Can be NULL + * devId id to use with async crypto. Can be 0 + * + * return 0 on success + */ int wc_AesXtsInit(XtsAes* aes, void* heap, int devId) { int ret = 0; @@ -12265,22 +12272,28 @@ int wc_AesXtsInit(XtsAes* aes, void* heap, int devId) return ret; } if ((ret = wc_AesInit(&aes->aes, heap, devId)) != 0) { + (void)wc_AesFree(&aes->tweak); return ret; } +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + if ((ret = wc_AesInit(&aes->aes_decrypt, heap, devId)) != 0) { + (void)wc_AesFree(&aes->tweak); + (void)wc_AesFree(&aes->aes); + return ret; + } +#endif return 0; } -/* This is to help with setting keys to correct encrypt or decrypt type. +/* Set up keys for encryption and/or decryption. * - * tweak AES key for tweak in XTS - * aes AES key for encrypt/decrypt process - * key buffer holding aes key | tweak key + * aes buffer holding aes subkeys + * key AES key for encrypt/decrypt and tweak process (concatenated) * len length of key buffer in bytes. Should be twice that of key size. i.e. * 32 for a 16 byte key. - * dir direction, either AES_ENCRYPTION or AES_DECRYPTION - * heap heap hint to use for memory. Can be NULL - * devId id to use with async crypto. Can be 0 + * dir direction: AES_ENCRYPTION, AES_DECRYPTION, or + * AES_ENCRYPTION_AND_DECRYPTION * * return 0 on success */ @@ -12293,27 +12306,82 @@ int wc_AesXtsSetKeyNoInit(XtsAes* aes, const byte* key, word32 len, int dir) return BAD_FUNC_ARG; } + if ((dir != AES_ENCRYPTION) && (dir != AES_DECRYPTION) +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + && (dir != AES_ENCRYPTION_AND_DECRYPTION) +#endif + ) + { + return BAD_FUNC_ARG; + } + keySz = len/2; - if (keySz != 16 && keySz != 32) { + if (keySz != AES_128_KEY_SIZE && keySz != AES_256_KEY_SIZE) { WOLFSSL_MSG("Unsupported key size"); return WC_KEY_SIZE_E; } - if ((ret = wc_AesSetKey(&aes->aes, key, keySz, NULL, dir)) == 0) { +#ifdef HAVE_FIPS + if (XMEMCMP(key, key + keySz, keySz) == 0) { + WOLFSSL_MSG("FIPS AES-XTS main and tweak keys must differ"); + return BAD_FUNC_ARG; + } +#endif + + if (dir == AES_ENCRYPTION +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + || dir == AES_ENCRYPTION_AND_DECRYPTION +#endif + ) + { + ret = wc_AesSetKey(&aes->aes, key, keySz, NULL, AES_ENCRYPTION); + } + +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + if ((ret == 0) && ((dir == AES_DECRYPTION) + || (dir == AES_ENCRYPTION_AND_DECRYPTION))) + ret = wc_AesSetKey(&aes->aes_decrypt, key, keySz, NULL, AES_DECRYPTION); +#else + if (dir == AES_DECRYPTION) + ret = wc_AesSetKey(&aes->aes, key, keySz, NULL, AES_DECRYPTION); +#endif + + if (ret == 0) ret = wc_AesSetKey(&aes->tweak, key + keySz, keySz, NULL, AES_ENCRYPTION); - if (ret != 0) { - wc_AesFree(&aes->aes); - } + #ifdef WOLFSSL_AESNI - if (aes->aes.use_aesni != aes->tweak.use_aesni) { - if (aes->aes.use_aesni) - aes->aes.use_aesni = 0; - else - aes->tweak.use_aesni = 0; - } + if (ret == 0) { + /* With WC_AES_C_DYNAMIC_FALLBACK, the main and tweak keys could have + * conflicting _aesni status, but the AES-XTS asm implementations need + * them to all be AESNI. If any aren't, disable AESNI on all. + */ + if ((((dir == AES_ENCRYPTION) +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + || (dir == AES_ENCRYPTION_AND_DECRYPTION) #endif + ) && + (aes->aes.use_aesni != aes->tweak.use_aesni)) +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + || + (((dir == AES_DECRYPTION) + || (dir == AES_ENCRYPTION_AND_DECRYPTION)) && + (aes->aes_decrypt.use_aesni != aes->tweak.use_aesni)) +#endif + ) + { +#ifdef WC_AES_C_DYNAMIC_FALLBACK + aes->aes.use_aesni = 0; +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + aes->aes_decrypt.use_aesni = 0; +#endif + aes->tweak.use_aesni = 0; +#else + ret = SYSLIB_FAILED_E; +#endif + } } +#endif return ret; } @@ -12356,6 +12424,9 @@ int wc_AesXtsFree(XtsAes* aes) { if (aes != NULL) { wc_AesFree(&aes->aes); +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + wc_AesFree(&aes->aes_decrypt); +#endif wc_AesFree(&aes->tweak); } @@ -12615,10 +12686,19 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, { int ret; + Aes *aes; + if (xaes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; } + aes = &xaes->aes; + + if (aes->keylen == 0) { + WOLFSSL_MSG("wc_AesXtsEncrypt called with unset encryption key."); + return BAD_FUNC_ARG; + } + if (iSz < AES_BLOCK_SIZE) { return BAD_FUNC_ARG; } @@ -12631,28 +12711,33 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, { #ifdef WOLFSSL_AESNI #ifdef WC_AES_C_DYNAMIC_FALLBACK - int orig_use_aesni = xaes->aes.use_aesni; + int orig_use_aesni = aes->use_aesni; #endif - if (xaes->aes.use_aesni && ((ret = SAVE_VECTOR_REGISTERS2()) != 0)) { + + if (aes->use_aesni && ((ret = SAVE_VECTOR_REGISTERS2()) != 0)) { #ifdef WC_AES_C_DYNAMIC_FALLBACK - xaes->aes.use_aesni = 0; + aes->use_aesni = 0; xaes->tweak.use_aesni = 0; #else return ret; #endif } - if (xaes->aes.use_aesni) { + if (aes->use_aesni) { #if defined(HAVE_INTEL_AVX1) if (IS_INTEL_AVX1(intel_flags)) { - AES_XTS_encrypt_avx1(in, out, sz, i, (const byte*)xaes->aes.key, - (const byte*)xaes->tweak.key, (int)xaes->aes.rounds); + AES_XTS_encrypt_avx1(in, out, sz, i, + (const byte*)aes->key, + (const byte*)xaes->tweak.key, + (int)aes->rounds); ret = 0; } else #endif { - AES_XTS_encrypt_aesni(in, out, sz, i, (const byte*)xaes->aes.key, - (const byte*)xaes->tweak.key, (int)xaes->aes.rounds); + AES_XTS_encrypt_aesni(in, out, sz, i, + (const byte*)aes->key, + (const byte*)xaes->tweak.key, + (int)aes->rounds); ret = 0; } } @@ -12663,11 +12748,11 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, } #ifdef WOLFSSL_AESNI - if (xaes->aes.use_aesni) + if (aes->use_aesni) RESTORE_VECTOR_REGISTERS(); #ifdef WC_AES_C_DYNAMIC_FALLBACK else if (orig_use_aesni) { - xaes->aes.use_aesni = orig_use_aesni; + aes->use_aesni = orig_use_aesni; xaes->tweak.use_aesni = orig_use_aesni; } #endif @@ -12677,7 +12762,7 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, return ret; } -/* Same process as encryption but Aes key is AES_DECRYPTION type. +/* Same process as encryption but use aes_decrypt key. * * xaes AES keys to use for block encrypt/decrypt * out output buffer to hold plain text @@ -12693,7 +12778,11 @@ static int AesXtsDecrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz, { int ret = 0; word32 blocks = (sz / AES_BLOCK_SIZE); +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + Aes *aes = &xaes->aes_decrypt; +#else Aes *aes = &xaes->aes; +#endif Aes *tweak = &xaes->tweak; word32 j; byte carry = 0; @@ -12821,11 +12910,23 @@ int wc_AesXtsDecrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, const byte* i, word32 iSz) { int ret; + Aes *aes; if (xaes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; } +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + aes = &xaes->aes_decrypt; +#else + aes = &xaes->aes; +#endif + + if (aes->keylen == 0) { + WOLFSSL_MSG("wc_AesXtsDecrypt called with unset decryption key."); + return BAD_FUNC_ARG; + } + if (iSz < AES_BLOCK_SIZE) { return BAD_FUNC_ARG; } @@ -12838,25 +12939,33 @@ int wc_AesXtsDecrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, { #ifdef WOLFSSL_AESNI #ifdef WC_AES_C_DYNAMIC_FALLBACK - int orig_use_aesni = xaes->aes.use_aesni; + int orig_use_aesni = aes->use_aesni; #endif - if (xaes->aes.use_aesni && (SAVE_VECTOR_REGISTERS2() != 0)) { - xaes->aes.use_aesni = 0; + if (aes->use_aesni && ((ret = SAVE_VECTOR_REGISTERS2() != 0))) { +#ifdef WC_AES_C_DYNAMIC_FALLBACK + aes->use_aesni = 0; xaes->tweak.use_aesni = 0; +#else + return ret; +#endif } - if (xaes->aes.use_aesni) { + if (aes->use_aesni) { #if defined(HAVE_INTEL_AVX1) if (IS_INTEL_AVX1(intel_flags)) { - AES_XTS_decrypt_avx1(in, out, sz, i, (const byte*)xaes->aes.key, - (const byte*)xaes->tweak.key, (int)xaes->aes.rounds); + AES_XTS_decrypt_avx1(in, out, sz, i, + (const byte*)aes->key, + (const byte*)xaes->tweak.key, + (int)aes->rounds); ret = 0; } else #endif { - AES_XTS_decrypt_aesni(in, out, sz, i, (const byte*)xaes->aes.key, - (const byte*)xaes->tweak.key, (int)xaes->aes.rounds); + AES_XTS_decrypt_aesni(in, out, sz, i, + (const byte*)aes->key, + (const byte*)xaes->tweak.key, + (int)aes->rounds); ret = 0; } } @@ -12867,11 +12976,11 @@ int wc_AesXtsDecrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, } #ifdef WOLFSSL_AESNI - if (xaes->aes.use_aesni) + if (aes->use_aesni) RESTORE_VECTOR_REGISTERS(); #ifdef WC_AES_C_DYNAMIC_FALLBACK else if (orig_use_aesni) { - xaes->aes.use_aesni = orig_use_aesni; + aes->use_aesni = orig_use_aesni; xaes->tweak.use_aesni = orig_use_aesni; } #endif diff --git a/wolfcrypt/src/memory.c b/wolfcrypt/src/memory.c index 565d91075..c01af6272 100644 --- a/wolfcrypt/src/memory.c +++ b/wolfcrypt/src/memory.c @@ -1534,9 +1534,44 @@ WOLFSSL_LOCAL int SAVE_VECTOR_REGISTERS2_fuzzer(void) { return 0; } -#endif +#endif /* DEBUG_VECTOR_REGISTER_ACCESS_FUZZING */ -#endif +#elif defined(DEBUG_VECTOR_REGISTER_ACCESS_FUZZING) + +/* DEBUG_VECTOR_REGISTER_ACCESS is undefined but fuzzing requested -- + * fuzz vector register access without the detailed debugging. + * this is useful for testing in the kernel module build, where glibc and + * thread-local storage are unavailable. + * + * note this is not a well-behaved PRNG, but is adequate for fuzzing purposes. + * the prn sequence is incompressible according to ent and xz, and does not + * cycle within 10M iterations with various seeds including zero, but the Chi + * square distribution is poor, and the unconditioned lsb bit balance is ~54% + * regardless of seed. + */ + +WOLFSSL_LOCAL int SAVE_VECTOR_REGISTERS2_fuzzer(void) { + static unsigned long prn = WC_DEBUG_VECTOR_REGISTERS_FUZZING_SEED; + static int balance_bit = 0; + /* access to prn is racey, but it doesn't matter. */ + unsigned long new_prn = prn ^ 0xba86943da66ee701ul; /* note this magic + * random number is + * bit-balanced. + */ + /* barrel-roll using the bottom 6 bits. */ + if (new_prn & 0x3f) + new_prn = (new_prn << (new_prn & 0x3f)) | + (new_prn >> (0x40 - (new_prn & 0x3f))); + prn = new_prn; + + balance_bit = !balance_bit; + + return ((prn & 1) ^ balance_bit) ? IO_FAILED_E : 0; +} + +#endif /* DEBUG_VECTOR_REGISTER_ACCESS || + * DEBUG_VECTOR_REGISTER_ACCESS_FUZZING + */ #ifdef WOLFSSL_LINUXKM #include "../../linuxkm/linuxkm_memory.c" diff --git a/wolfcrypt/test/test.c b/wolfcrypt/test/test.c index d598046e8..78ab95ae3 100644 --- a/wolfcrypt/test/test.c +++ b/wolfcrypt/test/test.c @@ -9438,6 +9438,7 @@ static wc_test_ret_t aes_xts_128_test(void) 0x77, 0x8a, 0xe8, 0xb4, 0x3c, 0xb9, 0x8d, 0x5a }; +#ifndef HAVE_FIPS /* FIPS requires different keys for main and tweak. */ WOLFSSL_SMALL_STACK_STATIC unsigned char k3[] = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, @@ -9462,6 +9463,7 @@ static wc_test_ret_t aes_xts_128_test(void) 0xA0, 0x85, 0xD2, 0x69, 0x6E, 0x87, 0x0A, 0xBF, 0xB5, 0x5A, 0xDD, 0xCB, 0x80, 0xE0, 0xFC, 0xCD }; +#endif /* HAVE_FIPS */ #if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_NO_MALLOC) if ((aes = (XtsAes *)XMALLOC(sizeof *aes, HEAP_HINT, DYNAMIC_TYPE_AES)) == NULL) @@ -9569,7 +9571,11 @@ static wc_test_ret_t aes_xts_128_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecrypt(aes, buf, cipher, sizeof(pp), i1, sizeof(i1)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -9581,7 +9587,11 @@ static wc_test_ret_t aes_xts_128_test(void) XMEMSET(buf, 0, sizeof(buf)); ret = wc_AesXtsDecrypt(aes, buf, cipher, sizeof(pp), i1, sizeof(i1)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(0); if (ret != 0) @@ -9594,7 +9604,11 @@ static wc_test_ret_t aes_xts_128_test(void) XMEMSET(buf, 0, sizeof(buf)); ret = wc_AesXtsDecrypt(aes, buf, c1, sizeof(c1), i1, sizeof(i1)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -9606,7 +9620,11 @@ static wc_test_ret_t aes_xts_128_test(void) XMEMSET(buf, 0, sizeof(buf)); ret = wc_AesXtsDecrypt(aes, buf, c1, sizeof(c1), i1, sizeof(i1)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif WC_DEBUG_SET_VECTOR_REGISTERS_RETVAL(0); if (ret != 0) @@ -9619,7 +9637,11 @@ static wc_test_ret_t aes_xts_128_test(void) XMEMSET(buf, 0, sizeof(buf)); ret = wc_AesXtsDecrypt(aes, buf, c2, sizeof(c2), i2, sizeof(i2)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -9633,13 +9655,19 @@ static wc_test_ret_t aes_xts_128_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecrypt(aes, buf, c2, sizeof(c2), i2, sizeof(i2)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); if (XMEMCMP(p2, buf, sizeof(p2))) ERROR_OUT(WC_TEST_RET_ENC_NC, out); +#ifndef HAVE_FIPS + /* Test ciphertext stealing in-place. */ XMEMCPY(buf, p3, sizeof(p3)); ret = wc_AesXtsSetKeyNoInit(aes, k3, sizeof(k3), AES_ENCRYPTION); @@ -9660,13 +9688,19 @@ static wc_test_ret_t aes_xts_128_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecrypt(aes, buf, buf, sizeof(c3), i3, sizeof(i3)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); if (XMEMCMP(p3, buf, sizeof(p3))) ERROR_OUT(WC_TEST_RET_ENC_NC, out); +#endif /* !HAVE_FIPS */ + #if !defined(BENCH_EMBEDDED) && !defined(HAVE_CAVIUM) && \ !defined(WOLFSSL_AFALG) { @@ -9705,7 +9739,12 @@ static wc_test_ret_t aes_xts_128_test(void) ret = wc_AesXtsDecrypt(aes, large_input, large_input, j, i1, sizeof(i1)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, + WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -9886,7 +9925,11 @@ static wc_test_ret_t aes_xts_256_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecrypt(aes, buf, cipher, sizeof(pp), i1, sizeof(i1)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -9897,7 +9940,11 @@ static wc_test_ret_t aes_xts_256_test(void) XMEMSET(buf, 0, sizeof(buf)); ret = wc_AesXtsDecrypt(aes, buf, c1, sizeof(c1), i1, sizeof(i1)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -9910,7 +9957,11 @@ static wc_test_ret_t aes_xts_256_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecrypt(aes, buf, c2, sizeof(c2), i2, sizeof(i2)); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -10141,7 +10192,11 @@ static wc_test_ret_t aes_xts_sector_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecryptSector(aes, buf, c1, sizeof(c1), s1); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -10169,7 +10224,11 @@ static wc_test_ret_t aes_xts_sector_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecryptSector(aes, buf, c2, sizeof(c2), s2); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -10201,7 +10260,11 @@ static wc_test_ret_t aes_xts_sector_test(void) ret = wc_AesXtsDecryptConsecutiveSectors(aes, data, c3, sizeof(c3), s3, sectorSz); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret != 0) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); @@ -10298,14 +10361,22 @@ static wc_test_ret_t aes_xts_args_test(void) ERROR_OUT(WC_TEST_RET_ENC_EC(ret), out); ret = wc_AesXtsDecryptSector(NULL, buf, c1, sizeof(c1), s1); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret == 0) ERROR_OUT(WC_TEST_RET_ENC_NC, out); ret = wc_AesXtsDecryptSector(aes, NULL, c1, sizeof(c1), s1); #if defined(WOLFSSL_ASYNC_CRYPT) + #ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + ret = wc_AsyncWait(ret, &aes->aes_decrypt.asyncDev, WC_ASYNC_FLAG_NONE); + #else ret = wc_AsyncWait(ret, &aes->aes.asyncDev, WC_ASYNC_FLAG_NONE); + #endif #endif if (ret == 0) ERROR_OUT(WC_TEST_RET_ENC_NC, out); @@ -30070,8 +30141,8 @@ WOLFSSL_TEST_SUBROUTINE wc_test_ret_t ecc_test(void) #endif /* HAVE_ECC160 */ #if (defined(HAVE_ECC192) || defined(HAVE_ALL_CURVES)) && ECC_MIN_KEY_SZ <= 192 ret = ecc_test_curve(&rng, 24, ECC_CURVE_DEF); - printf("keySize=24, Default\n"); if (ret < 0) { + printf("keySize=24, Default\n"); goto done; } #endif /* HAVE_ECC192 */ diff --git a/wolfssl/wolfcrypt/aes.h b/wolfssl/wolfcrypt/aes.h index dcd3320de..1c369cef5 100644 --- a/wolfssl/wolfcrypt/aes.h +++ b/wolfssl/wolfcrypt/aes.h @@ -179,6 +179,9 @@ enum { AES_ENC_TYPE = WC_CIPHER_AES, /* cipher unique type */ AES_ENCRYPTION = 0, AES_DECRYPTION = 1, +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + AES_ENCRYPTION_AND_DECRYPTION = 2, +#endif AES_BLOCK_SIZE = 16, @@ -399,6 +402,9 @@ struct Aes { #ifdef WOLFSSL_AES_XTS typedef struct XtsAes { Aes aes; +#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS + Aes aes_decrypt; +#endif Aes tweak; } XtsAes; #endif diff --git a/wolfssl/wolfcrypt/memory.h b/wolfssl/wolfcrypt/memory.h index cf8327316..1b5b82e35 100644 --- a/wolfssl/wolfcrypt/memory.h +++ b/wolfssl/wolfcrypt/memory.h @@ -267,6 +267,13 @@ WOLFSSL_LOCAL int wc_debug_CipherLifecycleFree(void **CipherLifecycleTag, ((void)(CipherLifecycleTag), (void)(heap), (void)(abort_p), 0) #endif +#ifdef DEBUG_VECTOR_REGISTER_ACCESS_FUZZING + WOLFSSL_LOCAL int SAVE_VECTOR_REGISTERS2_fuzzer(void); + #ifndef WC_DEBUG_VECTOR_REGISTERS_FUZZING_SEED + #define WC_DEBUG_VECTOR_REGISTERS_FUZZING_SEED 0 + #endif +#endif + #ifdef DEBUG_VECTOR_REGISTER_ACCESS WOLFSSL_API extern THREAD_LS_T int wc_svr_count; WOLFSSL_API extern THREAD_LS_T const char *wc_svr_last_file; @@ -320,11 +327,6 @@ WOLFSSL_LOCAL int wc_debug_CipherLifecycleFree(void **CipherLifecycleTag, } while (0) #ifdef DEBUG_VECTOR_REGISTER_ACCESS_FUZZING - #ifndef WC_DEBUG_VECTOR_REGISTERS_FUZZING_SEED - #define WC_DEBUG_VECTOR_REGISTERS_FUZZING_SEED 0 - #endif - WOLFSSL_LOCAL int SAVE_VECTOR_REGISTERS2_fuzzer(void); - #define SAVE_VECTOR_REGISTERS2(...) ({ \ int _svr2_val = SAVE_VECTOR_REGISTERS2_fuzzer(); \ if (_svr2_val == 0) { \ diff --git a/wolfssl/wolfcrypt/settings.h b/wolfssl/wolfcrypt/settings.h index 8091b02c5..444cd6002 100644 --- a/wolfssl/wolfcrypt/settings.h +++ b/wolfssl/wolfcrypt/settings.h @@ -2771,6 +2771,7 @@ extern void uITRON4_free(void *p) ; #ifndef WOLFSSL_TEST_SUBROUTINE #define WOLFSSL_TEST_SUBROUTINE static #endif + #undef HAVE_PTHREAD #undef HAVE_STRINGS_H #undef HAVE_ERRNO_H #undef HAVE_THREAD_LS