2022-01-07 22:39:38 -06:00
/* linuxkm_wc_port.h
*
2025-01-21 09:55:03 -07:00
* Copyright (C) 2006-2025 wolfSSL Inc.
2022-01-07 22:39:38 -06:00
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
2025-07-10 16:01:52 -06:00
* the Free Software Foundation; either version 3 of the License, or
2022-01-07 22:39:38 -06:00
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
/* included by wolfssl/wolfcrypt/wc_port.h */
# ifndef LINUXKM_WC_PORT_H
# define LINUXKM_WC_PORT_H
2023-05-17 01:44:36 -05:00
# include <linux/version.h>
# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
# error Unsupported kernel.
# endif
2025-02-26 20:55:56 -06:00
# if defined(HAVE_FIPS) && defined(LINUXKM_LKCAPI_REGISTER_AESXTS) && defined(CONFIG_CRYPTO_MANAGER_EXTRA_TESTS)
/* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS expects AES-XTS-384 to work, even when CONFIG_CRYPTO_FIPS, but FIPS 140-3 only allows AES-XTS-256 and AES-XTS-512. */
# error CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is incompatible with FIPS wolfCrypt AES-XTS -- please reconfigure the target kernel to disable CONFIG_CRYPTO_MANAGER_EXTRA_TESTS.
# endif
2022-01-07 22:39:38 -06:00
# ifdef HAVE_CONFIG_H
# ifndef PACKAGE_NAME
# error wc_port.h included before config.h
# endif
/* config.h is autogenerated without gating, and is subject to repeat
* inclusions, so gate it out here to keep autodetection masking
* intact:
*/
# undef HAVE_CONFIG_H
# endif
2022-05-04 13:16:45 -05:00
/* suppress inclusion of stdint-gcc.h to avoid conflicts with Linux native
* include/linux/types.h:
*/
# define _GCC_STDINT_H
# define WC_PTR_TYPE uintptr_t
/* needed to suppress inclusion of stdio.h in wolfssl/wolfcrypt/types.h */
# define XSNPRINTF snprintf
/* the rigmarole around kstrtoll() here is to accommodate its
* warn-unused-result attribute.
*
* also needed to suppress inclusion of stdlib.h in
* wolfssl/wolfcrypt/types.h.
*/
# define XATOI(s) ({ \
long long _xatoi_res = 0; \
int _xatoi_ret = kstrtoll(s, 10, &_xatoi_res); \
if (_xatoi_ret != 0) { \
_xatoi_res = 0; \
} \
(int)_xatoi_res; \
})
2024-01-29 17:48:31 -06:00
/* Kbuild+gcc on x86 doesn't consistently honor the default ALIGN16 on stack
* objects, but gives adequate alignment with "32".
2023-05-17 01:44:36 -05:00
*/
# if defined(CONFIG_X86) && !defined(ALIGN16)
# define ALIGN16 __attribute__ ( (aligned (32)))
# endif
2025-07-09 16:29:04 -05:00
/* kvmalloc()/kvfree() and friends added in linux commit a7c3e901, merged for 4.12.
* kvrealloc() added in de2860f463, merged for 5.15, backported to 5.10.137.
* moved to ultimate home (slab.h) in 8587ca6f34, merged for 5.16.
2025-07-09 18:22:01 -05:00
*
2025-07-23 14:31:52 -05:00
* however, until 6.12 (commit 590b9d576c), it took an extra argument,
* oldsize, that makes it incompatible with traditional libc usage patterns,
* so we don't try to use it.
2025-07-09 16:29:04 -05:00
*/
2025-07-23 14:31:52 -05:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) && \
!defined(DONT_HAVE_KVMALLOC) && !defined(HAVE_KVMALLOC)
2023-05-17 01:44:36 -05:00
# define HAVE_KVMALLOC
# endif
2025-07-23 14:31:52 -05:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 12, 0) && \
!defined(DONT_HAVE_KVREALLOC) && !defined(HAVE_KVREALLOC)
2025-07-09 18:22:01 -05:00
# define HAVE_KVREALLOC
# endif
2023-05-17 01:44:36 -05:00
2025-07-10 00:57:51 -05:00
# ifdef WOLFCRYPT_ONLY
2025-07-23 14:31:52 -05:00
# if defined(HAVE_KVMALLOC) && \
!defined(DONT_USE_KVMALLOC) && !defined(USE_KVMALLOC)
2025-07-10 00:57:51 -05:00
# define USE_KVMALLOC
# endif
2025-07-23 16:57:24 -05:00
# if defined(HAVE_KVREALLOC) && \
2025-07-23 14:31:52 -05:00
!defined(DONT_USE_KVREALLOC) && !defined(USE_KVREALLOC)
2025-07-10 00:57:51 -05:00
# define USE_KVREALLOC
# endif
# else
/* functioning realloc() is needed for the TLS stack. */
2025-07-23 14:31:52 -05:00
# if defined(HAVE_KVMALLOC) && defined(HAVE_KVREALLOC) && \
!defined(DONT_USE_KVMALLOC) && !defined(DONT_USE_KVREALLOC)
# ifndef USE_KVMALLOC
# define USE_KVMALLOC
# endif
# ifndef USE_KVREALLOC
# define USE_KVREALLOC
# endif
2025-07-10 00:57:51 -05:00
# endif
# endif
2023-05-17 01:44:36 -05:00
/* kernel printf doesn't implement fp. */
# ifndef WOLFSSL_NO_FLOAT_FMT
# define WOLFSSL_NO_FLOAT_FMT
# endif
2025-07-16 13:09:03 -05:00
# ifndef WOLFSSL_LINUXKM_USE_MUTEXES
struct wolfSSL_Mutex ;
extern int wc_lkm_LockMutex ( struct wolfSSL_Mutex * m ) ;
# endif
2025-07-25 15:56:48 -05:00
# ifndef WC_LINUXKM_INTR_SIGNALS
# define WC_LINUXKM_INTR_SIGNALS { SIGKILL, SIGABRT, SIGHUP, SIGINT }
# endif
extern int wc_linuxkm_check_for_intr_signals ( void ) ;
# ifndef WC_LINUXKM_MAX_NS_WITHOUT_YIELD
# define WC_LINUXKM_MAX_NS_WITHOUT_YIELD 1000000000
# endif
extern void wc_linuxkm_relax_long_loop ( void ) ;
2025-07-30 22:15:05 -05:00
enum wc_svr_flags {
2025-08-11 16:10:51 -05:00
WC_SVR_FLAG_NONE = 0 ,
2025-07-30 22:15:05 -05:00
WC_SVR_FLAG_INHIBIT = 1 ,
} ;
2025-07-31 10:37:39 -05:00
# if defined(WOLFSSL_AESNI) || defined(USE_INTEL_SPEEDUP) || \
defined(WOLFSSL_SP_X86_64_ASM)
# ifndef CONFIG_X86
# error X86 SIMD extensions requested, but CONFIG_X86 is not set.
# endif
# define WOLFSSL_LINUXKM_SIMD
# define WOLFSSL_LINUXKM_SIMD_X86
# ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
# define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
# endif
# elif defined(WOLFSSL_ARMASM) || defined(WOLFSSL_SP_ARM32_ASM) || \
defined(WOLFSSL_SP_ARM64_ASM) || defined(WOLFSSL_SP_ARM_THUMB_ASM) ||\
defined(WOLFSSL_SP_ARM_CORTEX_M_ASM)
# if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
# error ARM SIMD extensions requested, but CONFIG_ARM* is not set.
# endif
# define WOLFSSL_LINUXKM_SIMD
# define WOLFSSL_LINUXKM_SIMD_ARM
# ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
# define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
# endif
2025-08-22 21:57:23 -05:00
# endif
# if defined(HAVE_HASHDRBG) && defined(HAVE_FIPS) && FIPS_VERSION3_LT(6, 0, 0) && \
(defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)) && \
!defined(WC_LINUXKM_RDSEED_IN_GLUE_LAYER)
# define WC_LINUXKM_RDSEED_IN_GLUE_LAYER
# endif
# ifdef WC_LINUXKM_RDSEED_IN_GLUE_LAYER
struct OS_Seed ;
extern int wc_linuxkm_GenerateSeed_IntelRD ( struct OS_Seed * os , unsigned char * output , unsigned int sz ) ;
# define WC_GENERATE_SEED_DEFAULT wc_linuxkm_GenerateSeed_IntelRD
2025-07-31 10:37:39 -05:00
# endif
2022-01-07 22:39:38 -06:00
# ifdef BUILDING_WOLFSSL
2025-07-16 13:09:03 -05:00
# if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)) || \
(defined(RHEL_MAJOR) && \
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))) && \
defined(CONFIG_X86)
2025-07-14 10:27:35 -05:00
/* linux/slab.h recursively brings in linux/page-flags.h, bringing in
* non-inline implementations of functions folio_flags() and
* const_folio_flags(). but we can retrofit the attribute.
*/
struct folio ;
static __always_inline unsigned long * folio_flags (
struct folio * folio , unsigned n ) ;
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 9, 0)
static __always_inline const unsigned long * const_folio_flags (
const struct folio * folio , unsigned n ) ;
# endif
# endif
2022-01-07 22:39:38 -06:00
# if defined(CONFIG_MIPS) && defined(HAVE_LINUXKM_PIE_SUPPORT)
/* __ZBOOT__ disables some unhelpful macros around the mem*() funcs in
* legacy arch/mips/include/asm/string.h
*/
# define __ZBOOT__
# define memcmp __builtin_memcmp
# define __ARCH_MEMCMP_NO_REDIRECT
# define __ARCH_MEMCPY_NO_REDIRECT
# define __builtin_memcpy memcpy
extern void * memcpy ( void * dest , const void * src , unsigned int n ) ;
# define __ARCH_MEMCPY_NO_REDIRECT
# define __builtin_memset memset
extern void * memset ( void * dest , int c , unsigned int n ) ;
# endif
_Pragma ( " GCC diagnostic push " ) ;
/* we include all the needed kernel headers with these masked out. else
* there are profuse warnings.
*/
_Pragma ( " GCC diagnostic ignored \" -Wunused-parameter \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wpointer-arith \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wshadow \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wnested-externs \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wredundant-decls \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wsign-compare \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wpointer-sign \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wbad-function-cast \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wdiscarded-qualifiers \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wtype-limits \" " ) ;
_Pragma ( " GCC diagnostic ignored \" -Wswitch-enum \" " ) ;
2024-06-18 14:39:44 -05:00
_Pragma ( " GCC diagnostic ignored \" -Wcast-function-type \" " ) ; /* needed for kernel 4.14.336 */
2022-01-07 22:39:38 -06:00
# include <linux/kconfig.h>
2024-11-26 00:12:29 -06:00
# if defined(__PIE__) && defined(CONFIG_ARM64)
# define alt_cb_patch_nops my__alt_cb_patch_nops
2025-07-03 22:09:34 -05:00
# define queued_spin_lock_slowpath my__queued_spin_lock_slowpath
2024-11-26 00:12:29 -06:00
# endif
2022-01-07 22:39:38 -06:00
# include <linux/kernel.h>
# include <linux/ctype.h>
2024-01-26 20:01:19 -06:00
2024-02-09 00:47:23 -06:00
# if defined(CONFIG_FORTIFY_SOURCE) || defined(DEBUG_LINUXKM_FORTIFY_OVERLAY)
2024-01-26 20:01:19 -06:00
# ifdef __PIE__
/* the inline definitions in fortify-string.h use non-inline
* fortify_panic().
*/
extern void __my_fortify_panic ( const char * name ) __noreturn __cold ;
2025-04-10 17:23:17 +00:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6,9,0)
2025-04-09 18:35:04 +00:00
/* see linux 3d965b33e40d9 */
# define fortify_panic(func, write, avail, size, retfail) \
__my_fortify_panic(#func)
# else
# define fortify_panic __my_fortify_panic
# endif
2024-01-26 20:01:19 -06:00
# endif
/* the _FORTIFY_SOURCE macros and implementations for several string
* functions are incompatible with libwolfssl, so just reimplement with
* inlines and remap with macros.
*/
# define __ARCH_STRLEN_NO_REDIRECT
# define __ARCH_MEMCPY_NO_REDIRECT
# define __ARCH_MEMSET_NO_REDIRECT
# define __ARCH_MEMMOVE_NO_REDIRECT
/* the inline definitions in fortify-string.h use non-inline
* strlen().
*/
static inline size_t strlen ( const char * s ) {
const char * s_start = s ;
while ( * s )
+ + s ;
2024-01-27 23:12:37 -06:00
return ( size_t ) ( ( uintptr_t ) s - ( uintptr_t ) s_start ) ;
2024-01-26 20:01:19 -06:00
}
# include <linux/string.h>
# undef strlen
# define strlen(s) \
((__builtin_constant_p(s) && __builtin_constant_p(*(s))) ? \
(sizeof(s) - 1) : strlen(s))
static inline void * my_memcpy ( void * dest , const void * src , size_t n ) {
2024-01-29 17:48:31 -06:00
if ( ! ( ( ( uintptr_t ) dest | ( uintptr_t ) src | ( uintptr_t ) n )
& ( uintptr_t ) ( sizeof ( uintptr_t ) - 1 ) ) )
{
2024-01-27 23:12:37 -06:00
uintptr_t * src_longs = ( uintptr_t * ) src ,
* dest_longs = ( uintptr_t * ) dest ,
* endp = ( uintptr_t * ) ( ( u8 * ) src + n ) ;
while ( src_longs < endp )
* dest_longs + + = * src_longs + + ;
} else {
u8 * src_bytes = ( u8 * ) src ,
* dest_bytes = ( u8 * ) dest ,
* endp = src_bytes + n ;
while ( src_bytes < endp )
* dest_bytes + + = * src_bytes + + ;
}
2024-01-26 20:01:19 -06:00
return dest ;
}
# undef memcpy
# define memcpy my_memcpy
static inline void * my_memset ( void * dest , int c , size_t n ) {
2024-01-29 17:48:31 -06:00
if ( ! ( ( ( uintptr_t ) dest | ( uintptr_t ) n )
& ( uintptr_t ) ( sizeof ( uintptr_t ) - 1 ) ) )
{
2024-01-27 23:12:37 -06:00
uintptr_t c_long = __builtin_choose_expr (
sizeof ( uintptr_t ) = = 8 ,
( uintptr_t ) ( u8 ) c * 0x0101010101010101UL ,
( uintptr_t ) ( u8 ) c * 0x01010101U
) ;
2024-01-29 17:48:31 -06:00
uintptr_t * dest_longs = ( uintptr_t * ) dest ,
* endp = ( uintptr_t * ) ( ( u8 * ) dest_longs + n ) ;
2024-01-27 23:12:37 -06:00
while ( dest_longs < endp )
* dest_longs + + = c_long ;
} else {
u8 * dest_bytes = ( u8 * ) dest , * endp = dest_bytes + n ;
while ( dest_bytes < endp )
* dest_bytes + + = ( u8 ) c ;
}
2024-01-26 20:01:19 -06:00
return dest ;
}
# undef memset
# define memset my_memset
static inline void * my_memmove ( void * dest , const void * src , size_t n ) {
2024-01-29 17:48:31 -06:00
if ( ! ( ( ( uintptr_t ) dest | ( uintptr_t ) src | ( uintptr_t ) n )
& ( uintptr_t ) ( sizeof ( uintptr_t ) - 1 ) ) )
{
uintptr_t * src_longs = ( uintptr_t * ) src ,
* dest_longs = ( uintptr_t * ) dest ;
2024-01-27 23:12:37 -06:00
n > > = __builtin_choose_expr (
sizeof ( uintptr_t ) = = 8 ,
3U ,
2U ) ;
if ( src_longs < dest_longs ) {
uintptr_t * startp = src_longs ;
src_longs + = n - 1 ;
dest_longs + = n - 1 ;
while ( src_longs > = startp )
* dest_longs - - = * src_longs - - ;
} else if ( src_longs > dest_longs ) {
uintptr_t * endp = src_longs + n ;
while ( src_longs < endp )
* dest_longs + + = * src_longs + + ;
}
} else {
u8 * src_bytes = ( u8 * ) src , * dest_bytes = ( u8 * ) dest ;
if ( src_bytes < dest_bytes ) {
u8 * startp = src_bytes ;
src_bytes + = n - 1 ;
dest_bytes + = n - 1 ;
while ( src_bytes > = startp )
* dest_bytes - - = * src_bytes - - ;
} else if ( src_bytes > dest_bytes ) {
u8 * endp = src_bytes + n ;
while ( src_bytes < endp )
* dest_bytes + + = * src_bytes + + ;
}
2024-01-26 20:01:19 -06:00
}
return dest ;
}
# undef memmove
# define memmove my_memmove
2025-07-09 16:29:04 -05:00
# else /* !CONFIG_FORTIFY_SOURCE */
# include <linux/string.h>
# endif /* !CONFIG_FORTIFY_SOURCE */
2025-07-16 13:09:03 -05:00
# ifndef __PIE__
# include <linux/init.h>
# include <linux/module.h>
# include <linux/delay.h>
# endif
2025-07-10 00:57:51 -05:00
2025-07-16 13:09:03 -05:00
# if defined(HAVE_KVMALLOC) && \
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) && \
!(defined(RHEL_MAJOR) && ((RHEL_MAJOR > 9) || \
((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
/* before 5.16, the kvmalloc_node() and kvfree() prototypes were in
* mm.h. however, mm.h brings in static, but not inline, pmd_to_page(),
2025-07-10 00:57:51 -05:00
* with direct references to global vmem variables.
*/
2025-07-16 13:09:03 -05:00
# ifdef __PIE__
# include <linux/mm_types.h>
static __always_inline struct page * pmd_to_page ( pmd_t * pmd ) ;
2025-07-10 00:57:51 -05:00
# endif
2025-07-16 13:09:03 -05:00
# include <linux/mm.h>
2025-07-10 00:57:51 -05:00
# endif
# ifndef __PIE__
2025-07-16 13:09:03 -05:00
# include <linux/kthread.h>
2025-07-10 00:57:51 -05:00
# include <linux/net.h>
# endif
2025-07-16 13:09:03 -05:00
2025-07-10 00:57:51 -05:00
# include <linux/slab.h>
# include <linux/sched.h>
2025-07-14 12:39:41 -05:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
/* for signal_pending() */
# include <linux/sched/signal.h>
2025-07-26 08:27:43 -05:00
/* for local_clock() */
2025-07-25 15:56:48 -05:00
# include <linux/sched/clock.h>
2025-07-14 12:39:41 -05:00
# endif
2025-07-10 00:57:51 -05:00
# include <linux/random.h>
2025-07-09 16:29:04 -05:00
# ifdef LINUXKM_LKCAPI_REGISTER
/* the LKCAPI assumes that expanded encrypt and decrypt keys will stay
* loaded simultaneously, and the Linux in-tree implementations have two
* AES key structs in each context, one for each direction. in
* linuxkm/lkcapi_aes_glue.c, we do the same
* thing with "struct km_AesCtx". however, wolfCrypt struct AesXts
* already has two AES expanded keys, the main and tweak, and the tweak
* is always used in the encrypt direction regardless of the main
* direction. to avoid allocating and computing a duplicate second
* tweak encrypt key, we set
* WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS, which adds a second
* Aes slot to wolfCrypt's struct AesXts, and activates support for
* AES_ENCRYPTION_AND_DECRYPTION on AES-XTS.
*/
# ifndef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS
# define WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS
# endif
2025-07-10 00:57:51 -05:00
# ifndef __PIE__
# include <linux/crypto.h>
# include <linux/scatterlist.h>
# include <crypto/scatterwalk.h>
# include <crypto/internal/aead.h>
# include <crypto/internal/hash.h>
# include <crypto/internal/rng.h>
# include <crypto/internal/skcipher.h>
# include <crypto/internal/akcipher.h>
# include <crypto/internal/kpp.h>
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)
# include <crypto/internal/sig.h>
# endif /* linux ver >= 6.13 */
# ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES
# include <linux/kprobes.h>
# endif
# if defined(_LINUX_REFCOUNT_H) || defined(_LINUX_REFCOUNT_TYPES_H)
# define WC_LKM_REFCOUNT_TO_INT(refcount) (atomic_read(&(refcount.refs)))
# else
# define WC_LKM_REFCOUNT_TO_INT(refcount) (atomic_read(&(refcount)))
# endif
# endif /* !__PIE__ */
2025-07-09 16:29:04 -05:00
# endif /* LINUXKM_LKCAPI_REGISTER */
2024-01-26 20:01:19 -06:00
2025-07-25 15:56:48 -05:00
# ifndef WC_CHECK_FOR_INTR_SIGNALS
# define WC_CHECK_FOR_INTR_SIGNALS() wc_linuxkm_check_for_intr_signals()
# endif
# ifndef WC_RELAX_LONG_LOOP
# define WC_RELAX_LONG_LOOP() wc_linuxkm_relax_long_loop()
# endif
2024-01-29 17:48:31 -06:00
/* benchmarks.c uses floating point math, so needs a working
* SAVE_VECTOR_REGISTERS().
*/
# if defined(WOLFSSL_LINUXKM_BENCHMARKS) && \
!defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS)
2023-05-17 01:44:36 -05:00
# define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
# endif
2024-01-29 17:48:31 -06:00
# if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && \
defined(CONFIG_X86)
2024-05-08 16:18:33 -05:00
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states ( void ) ;
extern void free_wolfcrypt_linuxkm_fpu_states ( void ) ;
2025-07-31 10:37:39 -05:00
WOLFSSL_API __must_check int wc_can_save_vector_registers_x86 ( void ) ;
WOLFSSL_API __must_check int wc_save_vector_registers_x86 ( enum wc_svr_flags flags ) ;
2025-08-28 11:02:45 -05:00
WOLFSSL_API void wc_restore_vector_registers_x86 ( enum wc_svr_flags flags ) ;
2024-05-08 16:18:33 -05:00
2022-01-07 22:39:38 -06:00
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
# include <asm/i387.h>
# else
# include <asm/simd.h>
2025-07-16 13:09:03 -05:00
# include <crypto/internal/simd.h>
2022-01-07 22:39:38 -06:00
# endif
2024-04-27 01:12:58 -05:00
# ifndef CAN_SAVE_VECTOR_REGISTERS
# ifdef DEBUG_VECTOR_REGISTER_ACCESS_FUZZING
2025-07-31 10:37:39 -05:00
# define CAN_SAVE_VECTOR_REGISTERS() (wc_can_save_vector_registers_x86() && (SAVE_VECTOR_REGISTERS2_fuzzer() == 0))
2024-04-27 01:12:58 -05:00
# else
2025-07-31 10:37:39 -05:00
# define CAN_SAVE_VECTOR_REGISTERS() wc_can_save_vector_registers_x86()
2024-04-27 01:12:58 -05:00
# endif
# endif
2022-01-07 22:39:38 -06:00
# ifndef SAVE_VECTOR_REGISTERS
2025-06-20 13:45:04 -05:00
# define SAVE_VECTOR_REGISTERS(fail_clause) { \
2025-08-11 16:10:51 -05:00
int _svr_ret = wc_save_vector_registers_x86(WC_SVR_FLAG_NONE); \
2025-06-20 13:45:04 -05:00
if (_svr_ret != 0) { \
fail_clause \
} \
2024-01-29 17:48:31 -06:00
}
2024-02-09 00:47:23 -06:00
# endif
# ifndef SAVE_VECTOR_REGISTERS2
2024-01-26 14:07:58 -06:00
# ifdef DEBUG_VECTOR_REGISTER_ACCESS_FUZZING
2024-01-29 17:48:31 -06:00
# define SAVE_VECTOR_REGISTERS2() ({ \
int _fuzzer_ret = SAVE_VECTOR_REGISTERS2_fuzzer(); \
(_fuzzer_ret == 0) ? \
2025-08-11 16:10:51 -05:00
wc_save_vector_registers_x86(WC_SVR_FLAG_NONE) : \
2024-01-29 17:48:31 -06:00
_fuzzer_ret; \
})
2024-01-26 14:07:58 -06:00
# else
2025-08-11 16:10:51 -05:00
# define SAVE_VECTOR_REGISTERS2() wc_save_vector_registers_x86(WC_SVR_FLAG_NONE)
2024-01-26 14:07:58 -06:00
# endif
2022-01-07 22:39:38 -06:00
# endif
# ifndef RESTORE_VECTOR_REGISTERS
2025-08-28 11:02:45 -05:00
# define RESTORE_VECTOR_REGISTERS() wc_restore_vector_registers_x86(WC_SVR_FLAG_NONE)
2022-01-07 22:39:38 -06:00
# endif
2024-05-08 16:18:33 -05:00
2025-06-20 13:45:04 -05:00
# ifndef DISABLE_VECTOR_REGISTERS
2025-07-31 10:37:39 -05:00
# define DISABLE_VECTOR_REGISTERS() wc_save_vector_registers_x86(WC_SVR_FLAG_INHIBIT)
2025-06-20 13:45:04 -05:00
# endif
# ifndef REENABLE_VECTOR_REGISTERS
2025-08-28 11:02:45 -05:00
# define REENABLE_VECTOR_REGISTERS() wc_restore_vector_registers_x86(WC_SVR_FLAG_INHIBIT)
2025-06-20 13:45:04 -05:00
# endif
2023-05-09 23:53:49 -05:00
# elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
2024-05-08 16:18:33 -05:00
# error kernel module ARM SIMD is not yet tested or usable.
2022-01-07 22:39:38 -06:00
# include <asm/fpsimd.h>
2024-05-08 16:18:33 -05:00
static WARN_UNUSED_RESULT inline int save_vector_registers_arm ( void )
{
preempt_disable ( ) ;
if ( ! may_use_simd ( ) ) {
preempt_enable ( ) ;
return BAD_STATE_E ;
} else {
fpsimd_preserve_current_state ( ) ;
return 0 ;
}
}
static inline void restore_vector_registers_arm ( void )
{
fpsimd_restore_current_state ( ) ;
preempt_enable ( ) ;
}
2022-01-07 22:39:38 -06:00
# ifndef SAVE_VECTOR_REGISTERS
# define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_arm(); if (_svr_ret != 0) { fail_clause } }
2024-02-09 00:47:23 -06:00
# endif
# ifndef SAVE_VECTOR_REGISTERS2
2023-11-17 01:15:28 -06:00
# define SAVE_VECTOR_REGISTERS2() save_vector_registers_arm()
2022-01-07 22:39:38 -06:00
# endif
2024-04-27 01:12:58 -05:00
# ifndef CAN_SAVE_VECTOR_REGISTERS
# define CAN_SAVE_VECTOR_REGISTERS() can_save_vector_registers_arm()
# endif
2022-01-07 22:39:38 -06:00
# ifndef RESTORE_VECTOR_REGISTERS
# define RESTORE_VECTOR_REGISTERS() restore_vector_registers_arm()
# endif
2024-05-08 16:18:33 -05:00
2023-05-09 23:53:49 -05:00
# elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS)
2025-07-31 10:37:39 -05:00
# error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unimplemented architecture.
2024-05-08 16:18:33 -05:00
# endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
2022-01-07 22:39:38 -06:00
_Pragma ( " GCC diagnostic pop " ) ;
2023-06-08 14:43:05 -05:00
/* avoid -Wpointer-arith, encountered when -DCONFIG_FORTIFY_SOURCE */
# undef __is_constexpr
# define __is_constexpr(x) __builtin_constant_p(x)
2022-01-07 22:39:38 -06:00
/* the kernel uses -std=c89, but not -pedantic, and makes full use of anon
* structs/unions, so we should too.
*/
# define HAVE_ANONYMOUS_INLINE_AGGREGATES 1
# define NO_THREAD_LS
# define NO_ATTRIBUTE_CONSTRUCTOR
# ifdef HAVE_FIPS
extern int wolfCrypt_FIPS_first ( void ) ;
extern int wolfCrypt_FIPS_last ( void ) ;
2024-04-13 21:12:22 -05:00
# if FIPS_VERSION3_GE(6,0,0)
2025-07-06 09:55:05 -05:00
# ifndef NO_AES
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_AES_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# if defined(WOLFSSL_CMAC) && defined(WOLFSSL_AES_DIRECT)
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_CMAC_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# endif
# ifndef NO_DH
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_DH_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_ECC
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_ECC_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_ED25519
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_ED25519_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_ED448
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_ED448_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_HMAC_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# ifndef NO_KDF
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_KDF_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_PBKDF2
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_PBKDF_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_HASHDRBG
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_DRBG_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifndef NO_RSA
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_RSA_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifndef NO_SHA
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_SHA_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifndef NO_SHA256
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_SHA256_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef WOLFSSL_SHA512
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_SHA512_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef WOLFSSL_SHA3
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_SHA3_sanity ( void ) ;
2025-07-06 09:55:05 -05:00
# endif
2024-04-13 21:12:22 -05:00
extern int wolfCrypt_FIPS_FT_sanity ( void ) ;
extern int wc_RunAllCast_fips ( void ) ;
# endif
2022-01-07 22:39:38 -06:00
# endif
# if !defined(WOLFCRYPT_ONLY) && !defined(NO_CERTS)
/* work around backward dependency of asn.c on ssl.c. */
struct Signer ;
struct Signer * GetCA ( void * signers , unsigned char * hash ) ;
# ifndef NO_SKID
struct Signer * GetCAByName ( void * signers , unsigned char * hash ) ;
2024-09-06 14:15:19 -05:00
# ifdef HAVE_OCSP
struct Signer * GetCAByKeyHash ( void * vp , const unsigned char * keyHash ) ;
# endif /* HAVE_OCSP */
2024-09-13 18:01:11 -05:00
# ifdef WOLFSSL_AKID_NAME
struct Signer * GetCAByAKID ( void * vp , const unsigned char * issuer ,
unsigned int issuerSz ,
const unsigned char * serial ,
unsigned int serialSz ) ;
# endif
2024-09-06 14:15:19 -05:00
# endif /* NO_SKID */
2024-11-21 21:59:26 -06:00
# if defined(OPENSSL_EXTRA) || defined(OPENSSL_EXTRA_X509_SMALL)
struct WOLFSSL_X509_NAME ;
extern int wolfSSL_X509_NAME_add_entry_by_NID ( struct WOLFSSL_X509_NAME * name , int nid ,
int type , const unsigned char * bytes ,
int len , int loc , int set ) ;
extern void wolfSSL_X509_NAME_free ( struct WOLFSSL_X509_NAME * name ) ;
extern struct WOLFSSL_X509_NAME * wolfSSL_X509_NAME_new_ex ( void * heap ) ;
# endif /* OPENSSL_EXTRA || OPENSSL_EXTRA_X509_SMALL */
2024-09-06 14:15:19 -05:00
# endif /* !WOLFCRYPT_ONLY && !NO_CERTS */
2022-01-07 22:39:38 -06:00
# if defined(__PIE__) && !defined(USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE)
2023-05-17 01:44:36 -05:00
# error "compiling -fPIE requires PIE redirect table."
2022-01-07 22:39:38 -06:00
# endif
2025-08-26 11:07:40 -05:00
# ifdef HAVE_LINUXKM_PIE_SUPPORT
2025-08-22 00:34:01 -05:00
extern const u8
__wc_text_start [ ] ,
__wc_text_end [ ] ,
__wc_rodata_start [ ] ,
__wc_rodata_end [ ] ,
__wc_rwdata_start [ ] ,
__wc_rwdata_end [ ] ,
__wc_bss_start [ ] ,
__wc_bss_end [ ] ;
extern const unsigned int wc_linuxkm_pie_reloc_tab [ ] ;
extern const size_t wc_linuxkm_pie_reloc_tab_length ;
extern ssize_t wc_linuxkm_normalize_relocations (
const u8 * text_in ,
size_t text_in_len ,
u8 * text_out ,
ssize_t * cur_index_p ) ;
2025-08-26 11:07:40 -05:00
# endif /* HAVE_LINUXKM_PIE_SUPPORT */
# ifdef USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE
# ifdef CONFIG_MIPS
# undef __ARCH_MEMCMP_NO_REDIRECT
# undef memcmp
extern int memcmp ( const void * s1 , const void * s2 , size_t n ) ;
# endif
2025-08-22 00:34:01 -05:00
2022-01-07 22:39:38 -06:00
struct wolfssl_linuxkm_pie_redirect_table {
2025-08-22 00:34:01 -05:00
typeof ( wc_linuxkm_normalize_relocations ) * wc_linuxkm_normalize_relocations ;
2022-01-07 22:39:38 -06:00
# ifndef __ARCH_MEMCMP_NO_REDIRECT
typeof ( memcmp ) * memcmp ;
# endif
# ifndef __ARCH_MEMCPY_NO_REDIRECT
typeof ( memcpy ) * memcpy ;
# endif
# ifndef __ARCH_MEMSET_NO_REDIRECT
typeof ( memset ) * memset ;
# endif
# ifndef __ARCH_MEMMOVE_NO_REDIRECT
typeof ( memmove ) * memmove ;
# endif
2022-05-10 12:20:12 -05:00
# ifndef __ARCH_STRCMP_NO_REDIRECT
typeof ( strcmp ) * strcmp ;
# endif
2022-01-07 22:39:38 -06:00
# ifndef __ARCH_STRNCMP_NO_REDIRECT
typeof ( strncmp ) * strncmp ;
# endif
2022-05-10 12:20:12 -05:00
# ifndef __ARCH_STRCASECMP_NO_REDIRECT
typeof ( strcasecmp ) * strcasecmp ;
# endif
# ifndef __ARCH_STRNCASECMP_NO_REDIRECT
typeof ( strncasecmp ) * strncasecmp ;
# endif
2022-01-07 22:39:38 -06:00
# ifndef __ARCH_STRLEN_NO_REDIRECT
typeof ( strlen ) * strlen ;
# endif
# ifndef __ARCH_STRSTR_NO_REDIRECT
typeof ( strstr ) * strstr ;
# endif
# ifndef __ARCH_STRNCPY_NO_REDIRECT
typeof ( strncpy ) * strncpy ;
# endif
# ifndef __ARCH_STRNCAT_NO_REDIRECT
typeof ( strncat ) * strncat ;
# endif
typeof ( kstrtoll ) * kstrtoll ;
2025-07-16 13:09:03 -05:00
# if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) || \
(defined(RHEL_MAJOR) && \
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
2022-01-07 22:39:38 -06:00
typeof ( _printk ) * _printk ;
# else
typeof ( printk ) * printk ;
# endif
2024-01-26 20:01:19 -06:00
# ifdef CONFIG_FORTIFY_SOURCE
typeof ( __warn_printk ) * __warn_printk ;
# endif
2022-01-07 22:39:38 -06:00
typeof ( snprintf ) * snprintf ;
const unsigned char * _ctype ;
2025-07-23 16:57:24 -05:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 11, 0)
2024-08-02 10:16:19 -05:00
typeof ( kmalloc_noprof ) * kmalloc_noprof ;
typeof ( krealloc_noprof ) * krealloc_noprof ;
typeof ( kzalloc_noprof ) * kzalloc_noprof ;
typeof ( __kvmalloc_node_noprof ) * __kvmalloc_node_noprof ;
typeof ( __kmalloc_cache_noprof ) * __kmalloc_cache_noprof ;
2025-07-23 16:57:24 -05:00
# ifdef HAVE_KVREALLOC
typeof ( kvrealloc_noprof ) * kvrealloc_noprof ;
# endif
2024-08-02 10:16:19 -05:00
# elif LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0)
2024-05-30 11:21:42 -05:00
typeof ( kmalloc_noprof ) * kmalloc_noprof ;
typeof ( krealloc_noprof ) * krealloc_noprof ;
typeof ( kzalloc_noprof ) * kzalloc_noprof ;
typeof ( kvmalloc_node_noprof ) * kvmalloc_node_noprof ;
typeof ( kmalloc_trace_noprof ) * kmalloc_trace_noprof ;
2025-07-09 18:22:01 -05:00
# ifdef HAVE_KVREALLOC
typeof ( kvrealloc_noprof ) * kvrealloc_noprof ;
# endif
2024-05-30 11:21:42 -05:00
# else /* <6.10.0 */
2022-01-07 22:39:38 -06:00
typeof ( kmalloc ) * kmalloc ;
typeof ( krealloc ) * krealloc ;
# ifdef HAVE_KVMALLOC
2025-07-09 16:29:04 -05:00
typeof ( kvmalloc_node ) * kvmalloc_node ;
2025-07-09 18:22:01 -05:00
# endif
# ifdef HAVE_KVREALLOC
2025-07-09 16:29:04 -05:00
typeof ( kvrealloc ) * kvrealloc ;
2022-01-07 22:39:38 -06:00
# endif
2025-07-16 13:09:03 -05:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) || \
(defined(RHEL_MAJOR) && \
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
2022-10-18 13:34:24 -05:00
typeof ( kmalloc_trace ) * kmalloc_trace ;
# else
typeof ( kmem_cache_alloc_trace ) * kmem_cache_alloc_trace ;
typeof ( kmalloc_order_trace ) * kmalloc_order_trace ;
# endif
2024-05-30 11:21:42 -05:00
# endif /* <6.10.0 */
# ifdef HAVE_KVMALLOC
typeof ( kvfree ) * kvfree ;
# endif
typeof ( kfree ) * kfree ;
typeof ( ksize ) * ksize ;
2022-01-07 22:39:38 -06:00
typeof ( get_random_bytes ) * get_random_bytes ;
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
typeof ( getnstimeofday ) * getnstimeofday ;
# elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
typeof ( current_kernel_time64 ) * current_kernel_time64 ;
# else
typeof ( ktime_get_coarse_real_ts64 ) * ktime_get_coarse_real_ts64 ;
# endif
struct task_struct * ( * get_current ) ( void ) ;
2023-05-17 01:44:36 -05:00
# ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
2022-01-07 22:39:38 -06:00
2023-05-17 01:44:36 -05:00
# ifdef CONFIG_X86
2024-05-08 16:18:33 -05:00
typeof ( allocate_wolfcrypt_linuxkm_fpu_states ) * allocate_wolfcrypt_linuxkm_fpu_states ;
2025-07-31 10:37:39 -05:00
typeof ( wc_can_save_vector_registers_x86 ) * wc_can_save_vector_registers_x86 ;
2024-05-08 16:18:33 -05:00
typeof ( free_wolfcrypt_linuxkm_fpu_states ) * free_wolfcrypt_linuxkm_fpu_states ;
2025-07-31 10:37:39 -05:00
typeof ( wc_restore_vector_registers_x86 ) * wc_restore_vector_registers_x86 ;
typeof ( wc_save_vector_registers_x86 ) * wc_save_vector_registers_x86 ;
2023-05-17 01:44:36 -05:00
# else /* !CONFIG_X86 */
2025-07-31 10:37:39 -05:00
# error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unimplemented architecture.
2023-05-17 01:44:36 -05:00
# endif /* arch */
# endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
2022-01-07 22:39:38 -06:00
typeof ( __mutex_init ) * __mutex_init ;
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
typeof ( mutex_lock_nested ) * mutex_lock_nested ;
# else
typeof ( mutex_lock ) * mutex_lock ;
# endif
typeof ( mutex_unlock ) * mutex_unlock ;
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
typeof ( mutex_destroy ) * mutex_destroy ;
# endif
# ifdef HAVE_FIPS
typeof ( wolfCrypt_FIPS_first ) * wolfCrypt_FIPS_first ;
typeof ( wolfCrypt_FIPS_last ) * wolfCrypt_FIPS_last ;
2024-04-13 21:12:22 -05:00
# if FIPS_VERSION3_GE(6,0,0)
2025-07-06 09:55:05 -05:00
# ifndef NO_AES
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_AES_sanity ) * wolfCrypt_FIPS_AES_sanity ;
2025-07-06 09:55:05 -05:00
# if defined(WOLFSSL_CMAC) && defined(WOLFSSL_AES_DIRECT)
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_CMAC_sanity ) * wolfCrypt_FIPS_CMAC_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# endif
# ifndef NO_DH
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_DH_sanity ) * wolfCrypt_FIPS_DH_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_ECC
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_ECC_sanity ) * wolfCrypt_FIPS_ECC_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_ED25519
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_ED25519_sanity ) * wolfCrypt_FIPS_ED25519_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_ED448
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_ED448_sanity ) * wolfCrypt_FIPS_ED448_sanity ;
2025-07-06 09:55:05 -05:00
# endif
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_HMAC_sanity ) * wolfCrypt_FIPS_HMAC_sanity ;
2025-07-06 09:55:05 -05:00
# ifndef NO_KDF
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_KDF_sanity ) * wolfCrypt_FIPS_KDF_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_PBKDF2
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_PBKDF_sanity ) * wolfCrypt_FIPS_PBKDF_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef HAVE_HASHDRBG
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_DRBG_sanity ) * wolfCrypt_FIPS_DRBG_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifndef NO_RSA
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_RSA_sanity ) * wolfCrypt_FIPS_RSA_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifndef NO_SHA
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_SHA_sanity ) * wolfCrypt_FIPS_SHA_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifndef NO_SHA256
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_SHA256_sanity ) * wolfCrypt_FIPS_SHA256_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef WOLFSSL_SHA512
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_SHA512_sanity ) * wolfCrypt_FIPS_SHA512_sanity ;
2025-07-06 09:55:05 -05:00
# endif
# ifdef WOLFSSL_SHA3
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_SHA3_sanity ) * wolfCrypt_FIPS_SHA3_sanity ;
2025-07-06 09:55:05 -05:00
# endif
2024-04-13 21:12:22 -05:00
typeof ( wolfCrypt_FIPS_FT_sanity ) * wolfCrypt_FIPS_FT_sanity ;
typeof ( wc_RunAllCast_fips ) * wc_RunAllCast_fips ;
# endif
2022-01-07 22:39:38 -06:00
# endif
# if !defined(WOLFCRYPT_ONLY) && !defined(NO_CERTS)
typeof ( GetCA ) * GetCA ;
# ifndef NO_SKID
typeof ( GetCAByName ) * GetCAByName ;
2024-09-06 14:15:19 -05:00
# ifdef HAVE_OCSP
typeof ( GetCAByKeyHash ) * GetCAByKeyHash ;
# endif /* HAVE_OCSP */
# endif /* NO_SKID */
2024-09-13 18:01:11 -05:00
# ifdef WOLFSSL_AKID_NAME
typeof ( GetCAByAKID ) * GetCAByAKID ;
# endif /* WOLFSSL_AKID_NAME */
2024-11-21 21:59:26 -06:00
# if defined(OPENSSL_EXTRA) || defined(OPENSSL_EXTRA_X509_SMALL)
typeof ( wolfSSL_X509_NAME_add_entry_by_NID ) * wolfSSL_X509_NAME_add_entry_by_NID ;
typeof ( wolfSSL_X509_NAME_free ) * wolfSSL_X509_NAME_free ;
typeof ( wolfSSL_X509_NAME_new_ex ) * wolfSSL_X509_NAME_new_ex ;
# endif /* OPENSSL_EXTRA || OPENSSL_EXTRA_X509_SMALL */
2024-09-06 14:15:19 -05:00
# endif /* !WOLFCRYPT_ONLY && !NO_CERTS */
2022-01-07 22:39:38 -06:00
2024-08-20 23:36:07 -05:00
# ifdef WOLFSSL_DEBUG_BACKTRACE_ERROR_CODES
typeof ( dump_stack ) * dump_stack ;
# endif
2024-11-26 00:12:29 -06:00
# ifdef CONFIG_ARM64
# ifdef __PIE__
2025-07-03 22:09:34 -05:00
/* alt_cb_patch_nops and queued_spin_lock_slowpath are defined early
* to allow shimming in system headers, but now we need the native
* ones.
2024-11-26 00:12:29 -06:00
*/
# undef alt_cb_patch_nops
typeof ( my__alt_cb_patch_nops ) * alt_cb_patch_nops ;
2025-07-03 22:09:34 -05:00
# undef queued_spin_lock_slowpath
typeof ( my__queued_spin_lock_slowpath ) * queued_spin_lock_slowpath ;
2024-11-26 00:12:29 -06:00
# else
typeof ( alt_cb_patch_nops ) * alt_cb_patch_nops ;
2025-07-03 22:09:34 -05:00
typeof ( queued_spin_lock_slowpath ) * queued_spin_lock_slowpath ;
2024-11-26 00:12:29 -06:00
# endif
# endif
2025-06-30 15:23:04 -05:00
typeof ( preempt_count ) * preempt_count ;
2025-07-03 22:09:34 -05:00
# ifndef _raw_spin_lock_irqsave
typeof ( _raw_spin_lock_irqsave ) * _raw_spin_lock_irqsave ;
# endif
# ifndef _raw_spin_trylock
typeof ( _raw_spin_trylock ) * _raw_spin_trylock ;
# endif
# ifndef _raw_spin_unlock_irqrestore
typeof ( _raw_spin_unlock_irqrestore ) * _raw_spin_unlock_irqrestore ;
# endif
2025-06-30 15:23:04 -05:00
typeof ( _cond_resched ) * _cond_resched ;
2025-07-16 13:09:03 -05:00
# ifndef WOLFSSL_LINUXKM_USE_MUTEXES
typeof ( wc_lkm_LockMutex ) * wc_lkm_LockMutex ;
# endif
2025-06-30 15:23:04 -05:00
2025-07-25 15:56:48 -05:00
typeof ( wc_linuxkm_check_for_intr_signals ) * wc_linuxkm_check_for_intr_signals ;
typeof ( wc_linuxkm_relax_long_loop ) * wc_linuxkm_relax_long_loop ;
2022-01-07 22:39:38 -06:00
const void * _last_slot ;
} ;
extern const struct wolfssl_linuxkm_pie_redirect_table * wolfssl_linuxkm_get_pie_redirect_table ( void ) ;
2025-07-21 19:34:00 -05:00
extern struct wolfssl_linuxkm_pie_redirect_table wolfssl_linuxkm_pie_redirect_table ;
2025-07-22 14:40:45 -05:00
# if defined(WC_LKM_INDIRECT_SYM)
/* keep user-supplied override definition. */
# elif defined(WC_LKM_INDIRECT_SYM_BY_FUNC_ONLY) || \
defined(WC_LKM_INDIRECT_SYM_BY_DIRECT_TABLE_READ)
/* keep user-supplied override method. */
# elif defined(CONFIG_X86)
# define WC_LKM_INDIRECT_SYM_BY_DIRECT_TABLE_READ
2025-07-21 19:34:00 -05:00
# elif defined(CONFIG_ARM64)
2025-07-22 14:40:45 -05:00
/* direct access to wolfssl_linuxkm_pie_redirect_table.x on aarch64
* produces GOT relocations, e.g. R_AARCH64_LD64_GOT_LO12_NC.
*/
# define WC_LKM_INDIRECT_SYM_BY_FUNC_ONLY
2025-07-21 19:34:00 -05:00
# else
2025-07-22 14:40:45 -05:00
/* for other archs, by default use the safe way. */
# define WC_LKM_INDIRECT_SYM_BY_FUNC_ONLY
# endif
# if defined(WC_LKM_INDIRECT_SYM)
/* keep user-supplied override definition. */
# elif defined(WC_LKM_INDIRECT_SYM_BY_FUNC_ONLY)
2025-07-21 19:34:00 -05:00
# define WC_LKM_INDIRECT_SYM(x) (wolfssl_linuxkm_get_pie_redirect_table()->x)
2025-07-22 14:40:45 -05:00
# elif defined(WC_LKM_INDIRECT_SYM_BY_DIRECT_TABLE_READ)
# define WC_LKM_INDIRECT_SYM(x) (wolfssl_linuxkm_pie_redirect_table.x)
# else
# error no WC_LKM_INDIRECT_SYM method defined.
2025-07-21 19:34:00 -05:00
# endif
2022-01-07 22:39:38 -06:00
# ifdef __PIE__
2025-08-22 00:34:01 -05:00
# define wc_linuxkm_normalize_relocations \
WC_LKM_INDIRECT_SYM(wc_linuxkm_normalize_relocations)
2022-01-07 22:39:38 -06:00
# ifndef __ARCH_MEMCMP_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define memcmp WC_LKM_INDIRECT_SYM(memcmp)
2022-01-07 22:39:38 -06:00
# endif
# ifndef __ARCH_MEMCPY_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define memcpy WC_LKM_INDIRECT_SYM(memcpy)
2022-01-07 22:39:38 -06:00
# endif
# ifndef __ARCH_MEMSET_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define memset WC_LKM_INDIRECT_SYM(memset)
2022-01-07 22:39:38 -06:00
# endif
# ifndef __ARCH_MEMMOVE_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define memmove WC_LKM_INDIRECT_SYM(memmove)
2022-01-07 22:39:38 -06:00
# endif
2022-05-10 12:20:12 -05:00
# ifndef __ARCH_STRCMP_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strcmp WC_LKM_INDIRECT_SYM(strcmp)
2022-05-10 12:20:12 -05:00
# endif
2022-01-07 22:39:38 -06:00
# ifndef __ARCH_STRNCMP_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strncmp WC_LKM_INDIRECT_SYM(strncmp)
2022-01-07 22:39:38 -06:00
# endif
2022-05-10 12:20:12 -05:00
# ifndef __ARCH_STRCASECMP_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strcasecmp WC_LKM_INDIRECT_SYM(strcasecmp)
2022-05-10 12:20:12 -05:00
# endif
# ifndef __ARCH_STRNCASECMP_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strncasecmp WC_LKM_INDIRECT_SYM(strncasecmp)
2022-05-10 12:20:12 -05:00
# endif
2022-01-07 22:39:38 -06:00
# ifndef __ARCH_STRLEN_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strlen WC_LKM_INDIRECT_SYM(strlen)
2022-01-07 22:39:38 -06:00
# endif
# ifndef __ARCH_STRSTR_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strstr WC_LKM_INDIRECT_SYM(strstr)
2022-01-07 22:39:38 -06:00
# endif
# ifndef __ARCH_STRNCPY_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strncpy WC_LKM_INDIRECT_SYM(strncpy)
2022-01-07 22:39:38 -06:00
# endif
# ifndef __ARCH_STRNCAT_NO_REDIRECT
2025-07-21 19:34:00 -05:00
# define strncat WC_LKM_INDIRECT_SYM(strncat)
2022-01-07 22:39:38 -06:00
# endif
2025-07-21 19:34:00 -05:00
# define kstrtoll WC_LKM_INDIRECT_SYM(kstrtoll)
2022-01-07 22:39:38 -06:00
2025-07-16 13:09:03 -05:00
# if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) || \
(defined(RHEL_MAJOR) && \
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
2025-07-21 19:34:00 -05:00
# define _printk WC_LKM_INDIRECT_SYM(_printk)
2022-01-07 22:39:38 -06:00
# else
2025-07-21 19:34:00 -05:00
# define printk WC_LKM_INDIRECT_SYM(printk)
2022-01-07 22:39:38 -06:00
# endif
2024-01-26 20:01:19 -06:00
# ifdef CONFIG_FORTIFY_SOURCE
2025-07-21 19:34:00 -05:00
# define __warn_printk WC_LKM_INDIRECT_SYM(__warn_printk)
2024-01-26 20:01:19 -06:00
# endif
2025-07-21 19:34:00 -05:00
# define snprintf WC_LKM_INDIRECT_SYM(snprintf)
2022-01-07 22:39:38 -06:00
2025-07-21 19:34:00 -05:00
# define _ctype WC_LKM_INDIRECT_SYM(_ctype)
2022-01-07 22:39:38 -06:00
2025-07-23 16:57:24 -05:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 11, 0)
2024-08-02 10:16:19 -05:00
/* see include/linux/alloc_tag.h and include/linux/slab.h */
2025-07-21 19:34:00 -05:00
# define kmalloc_noprof WC_LKM_INDIRECT_SYM(kmalloc_noprof)
# define krealloc_noprof WC_LKM_INDIRECT_SYM(krealloc_noprof)
# define kzalloc_noprof WC_LKM_INDIRECT_SYM(kzalloc_noprof)
# define __kvmalloc_node_noprof WC_LKM_INDIRECT_SYM(__kvmalloc_node_noprof)
# define __kmalloc_cache_noprof WC_LKM_INDIRECT_SYM(__kmalloc_cache_noprof)
2025-07-23 16:57:24 -05:00
# ifdef HAVE_KVREALLOC
# define kvrealloc_noprof WC_LKM_INDIRECT_SYM(kvrealloc_noprof)
# endif
2024-08-02 10:16:19 -05:00
# elif LINUX_VERSION_CODE >= KERNEL_VERSION(6, 10, 0)
2024-05-30 11:21:42 -05:00
/* see include/linux/alloc_tag.h and include/linux/slab.h */
2025-07-21 19:34:00 -05:00
# define kmalloc_noprof WC_LKM_INDIRECT_SYM(kmalloc_noprof)
# define krealloc_noprof WC_LKM_INDIRECT_SYM(krealloc_noprof)
# define kzalloc_noprof WC_LKM_INDIRECT_SYM(kzalloc_noprof)
# define kvmalloc_node_noprof WC_LKM_INDIRECT_SYM(kvmalloc_node_noprof)
# define kmalloc_trace_noprof WC_LKM_INDIRECT_SYM(kmalloc_trace_noprof)
2025-07-23 16:57:24 -05:00
# ifdef HAVE_KVREALLOC
# define kvrealloc_noprof WC_LKM_INDIRECT_SYM(kvrealloc_noprof)
# endif
2024-05-30 11:21:42 -05:00
# else /* <6.10.0 */
2025-07-21 19:34:00 -05:00
# define kmalloc WC_LKM_INDIRECT_SYM(kmalloc)
# define krealloc WC_LKM_INDIRECT_SYM(krealloc)
2022-01-07 22:39:38 -06:00
# define kzalloc(size, flags) kmalloc(size, (flags) | __GFP_ZERO)
# ifdef HAVE_KVMALLOC
2025-07-21 19:34:00 -05:00
# define kvmalloc_node WC_LKM_INDIRECT_SYM(kvmalloc_node)
2025-07-09 18:22:01 -05:00
# endif
# ifdef HAVE_KVREALLOC
2025-07-21 19:34:00 -05:00
# define kvrealloc WC_LKM_INDIRECT_SYM(kvrealloc)
2022-01-07 22:39:38 -06:00
# endif
2025-07-16 13:09:03 -05:00
# if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) || \
(defined(RHEL_MAJOR) && \
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
2025-07-21 19:34:00 -05:00
# define kmalloc_trace WC_LKM_INDIRECT_SYM(kmalloc_trace)
2022-10-18 13:34:24 -05:00
# else
2025-07-21 19:34:00 -05:00
# define kmem_cache_alloc_trace WC_LKM_INDIRECT_SYM(kmem_cache_alloc_trace)
# define kmalloc_order_trace WC_LKM_INDIRECT_SYM(kmalloc_order_trace)
2022-10-18 13:34:24 -05:00
# endif
2024-05-30 11:21:42 -05:00
# endif /* <6.10.0 */
2025-07-21 19:34:00 -05:00
# define kfree WC_LKM_INDIRECT_SYM(kfree)
2024-05-30 11:21:42 -05:00
# ifdef HAVE_KVMALLOC
2025-07-21 19:34:00 -05:00
# define kvfree WC_LKM_INDIRECT_SYM(kvfree)
2024-05-30 11:21:42 -05:00
# endif
2025-07-21 19:34:00 -05:00
# define ksize WC_LKM_INDIRECT_SYM(ksize)
2024-05-30 11:21:42 -05:00
2025-07-21 19:34:00 -05:00
# define get_random_bytes WC_LKM_INDIRECT_SYM(get_random_bytes)
2022-01-07 22:39:38 -06:00
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
2025-07-21 19:34:00 -05:00
# define getnstimeofday WC_LKM_INDIRECT_SYM(getnstimeofday)
2022-01-07 22:39:38 -06:00
# elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
2025-07-21 19:34:00 -05:00
# define current_kernel_time64 WC_LKM_INDIRECT_SYM(current_kernel_time64)
2022-01-07 22:39:38 -06:00
# else
2025-07-21 19:34:00 -05:00
# define ktime_get_coarse_real_ts64 WC_LKM_INDIRECT_SYM(ktime_get_coarse_real_ts64)
2022-01-07 22:39:38 -06:00
# endif
# undef get_current
2025-07-21 19:34:00 -05:00
# define get_current WC_LKM_INDIRECT_SYM(get_current)
2022-01-07 22:39:38 -06:00
2024-05-08 16:18:33 -05:00
# if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
2025-07-21 19:34:00 -05:00
# define allocate_wolfcrypt_linuxkm_fpu_states WC_LKM_INDIRECT_SYM(allocate_wolfcrypt_linuxkm_fpu_states)
2025-07-31 10:37:39 -05:00
# define wc_can_save_vector_registers_x86 WC_LKM_INDIRECT_SYM(wc_can_save_vector_registers_x86)
2025-07-21 19:34:00 -05:00
# define free_wolfcrypt_linuxkm_fpu_states WC_LKM_INDIRECT_SYM(free_wolfcrypt_linuxkm_fpu_states)
2025-07-31 10:37:39 -05:00
# define wc_restore_vector_registers_x86 WC_LKM_INDIRECT_SYM(wc_restore_vector_registers_x86)
# define wc_save_vector_registers_x86 WC_LKM_INDIRECT_SYM(wc_save_vector_registers_x86)
2024-05-08 16:18:33 -05:00
# elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS)
2025-07-31 10:37:39 -05:00
# error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unimplemented architecture.
2023-05-17 01:44:36 -05:00
# endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
2022-01-07 22:39:38 -06:00
2025-07-21 19:34:00 -05:00
# define __mutex_init WC_LKM_INDIRECT_SYM(__mutex_init)
2022-01-07 22:39:38 -06:00
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
2025-07-21 19:34:00 -05:00
# define mutex_lock_nested WC_LKM_INDIRECT_SYM(mutex_lock_nested)
2022-01-07 22:39:38 -06:00
# else
2025-07-21 19:34:00 -05:00
# define mutex_lock WC_LKM_INDIRECT_SYM(mutex_lock)
2022-01-07 22:39:38 -06:00
# endif
2025-07-21 19:34:00 -05:00
# define mutex_unlock WC_LKM_INDIRECT_SYM(mutex_unlock)
2022-01-07 22:39:38 -06:00
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
2025-07-21 19:34:00 -05:00
# define mutex_destroy WC_LKM_INDIRECT_SYM(mutex_destroy)
2022-01-07 22:39:38 -06:00
# endif
/* per linux/ctype.h, tolower() and toupper() are macros bound to static inlines
* that use macros that bring in the _ctype global. for __PIE__, this needs to
* be masked out.
*/
# undef tolower
# undef toupper
# define tolower(c) (islower(c) ? (c) : ((c) + ('a'-'A')))
# define toupper(c) (isupper(c) ? (c) : ((c) - ('a'-'A')))
# if !defined(WOLFCRYPT_ONLY) && !defined(NO_CERTS)
2025-07-21 19:34:00 -05:00
# define GetCA WC_LKM_INDIRECT_SYM(GetCA)
2022-01-07 22:39:38 -06:00
# ifndef NO_SKID
2025-07-21 19:34:00 -05:00
# define GetCAByName WC_LKM_INDIRECT_SYM(GetCAByName)
2024-09-06 14:15:19 -05:00
# ifdef HAVE_OCSP
2025-07-21 19:34:00 -05:00
# define GetCAByKeyHash WC_LKM_INDIRECT_SYM(GetCAByKeyHash)
2024-09-06 14:15:19 -05:00
# endif /* HAVE_OCSP */
# endif /* NO_SKID */
2024-09-13 18:01:11 -05:00
# ifdef WOLFSSL_AKID_NAME
2025-07-21 19:34:00 -05:00
# define GetCAByAKID WC_LKM_INDIRECT_SYM(GetCAByAKID)
2024-09-13 18:01:11 -05:00
# endif
2024-11-21 21:59:26 -06:00
# if defined(OPENSSL_EXTRA) || defined(OPENSSL_EXTRA_X509_SMALL)
2025-07-21 19:34:00 -05:00
# define wolfSSL_X509_NAME_add_entry_by_NID WC_LKM_INDIRECT_SYM(wolfSSL_X509_NAME_add_entry_by_NID)
# define wolfSSL_X509_NAME_free WC_LKM_INDIRECT_SYM(wolfSSL_X509_NAME_free)
# define wolfSSL_X509_NAME_new_ex WC_LKM_INDIRECT_SYM(wolfSSL_X509_NAME_new_ex)
2024-11-21 21:59:26 -06:00
# endif /* OPENSSL_EXTRA || OPENSSL_EXTRA_X509_SMALL */
2024-09-06 14:15:19 -05:00
# endif /* !WOLFCRYPT_ONLY && !NO_CERTS */
2022-01-07 22:39:38 -06:00
2024-08-20 23:36:07 -05:00
# ifdef WOLFSSL_DEBUG_BACKTRACE_ERROR_CODES
2025-07-21 19:34:00 -05:00
# define dump_stack WC_LKM_INDIRECT_SYM(dump_stack)
2024-08-20 23:36:07 -05:00
# endif
2025-06-30 15:23:04 -05:00
# undef preempt_count /* just in case -- not a macro on x86. */
2025-07-21 19:34:00 -05:00
# define preempt_count WC_LKM_INDIRECT_SYM(preempt_count)
2025-07-03 22:09:34 -05:00
# ifndef WOLFSSL_LINUXKM_USE_MUTEXES
# ifndef _raw_spin_lock_irqsave
2025-07-21 19:34:00 -05:00
# define _raw_spin_lock_irqsave WC_LKM_INDIRECT_SYM(_raw_spin_lock_irqsave)
2025-07-03 22:09:34 -05:00
# endif
# ifndef _raw_spin_trylock
2025-07-21 19:34:00 -05:00
# define _raw_spin_trylock WC_LKM_INDIRECT_SYM(_raw_spin_trylock)
2025-07-03 22:09:34 -05:00
# endif
# ifndef _raw_spin_unlock_irqrestore
2025-07-21 19:34:00 -05:00
# define _raw_spin_unlock_irqrestore WC_LKM_INDIRECT_SYM(_raw_spin_unlock_irqrestore)
2025-07-03 22:09:34 -05:00
# endif
# endif
2025-07-21 19:34:00 -05:00
# define _cond_resched WC_LKM_INDIRECT_SYM(_cond_resched)
2025-06-30 15:23:04 -05:00
/* this is defined in linux/spinlock.h as an inline that calls the unshimmed
* raw_spin_unlock_irqrestore(). use a macro here to supersede it.
*/
# define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags)
2025-07-25 15:56:48 -05:00
# define wc_linuxkm_check_for_intr_signals WC_LKM_INDIRECT_SYM(wc_linuxkm_check_for_intr_signals)
# define wc_linuxkm_relax_long_loop WC_LKM_INDIRECT_SYM(wc_linuxkm_relax_long_loop)
2022-01-07 22:39:38 -06:00
# endif /* __PIE__ */
# endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */
/* remove this multifariously conflicting macro, picked up from
* Linux arch/<arch>/include/asm/current.h.
*/
2025-04-04 16:51:04 -05:00
# ifndef WOLFSSL_LINUXKM_NEED_LINUX_CURRENT
2022-01-07 22:39:38 -06:00
# undef current
# endif
/* min() and max() in linux/kernel.h over-aggressively type-check, producing
* myriad spurious -Werrors throughout the codebase.
*/
# undef min
# undef max
/* work around namespace conflict between wolfssl/internal.h (enum HandShakeType)
* and linux/key.h (extern int()).
*/
# define key_update wc_key_update
2025-07-16 13:09:03 -05:00
# if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) || \
(defined(RHEL_MAJOR) && \
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
2025-04-10 17:23:17 +00:00
# define lkm_printf(format, args...) _printk(KERN_INFO "wolfssl: %s(): " format, __func__, ## args)
# else
# define lkm_printf(format, args...) printk(KERN_INFO "wolfssl: %s(): " format, __func__, ## args)
# endif
2022-01-07 22:39:38 -06:00
# define printf(...) lkm_printf(__VA_ARGS__)
# ifdef HAVE_FIPS
extern void fipsEntry ( void ) ;
# endif
/* suppress false-positive "writing 1 byte into a region of size 0" warnings
* building old kernels with new gcc:
*/
# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
_Pragma ( " GCC diagnostic ignored \" -Wstringop-overflow \" " ) ;
# endif
/* includes are all above, with incompatible warnings masked out. */
# if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)
typedef __kernel_time_t time_t ;
# else
typedef __kernel_time64_t time_t ;
# endif
extern time_t time ( time_t * timer ) ;
# define XTIME time
# define WOLFSSL_GMTIME
# define XGMTIME(c, t) gmtime(c)
# define NO_TIMEVAL 1
2022-01-08 02:41:02 -06:00
# endif /* BUILDING_WOLFSSL */
2025-07-30 22:15:05 -05:00
# if !defined(BUILDING_WOLFSSL)
/* some caller code needs these. */
# if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS)
2025-07-31 10:37:39 -05:00
# if defined(CONFIG_X86)
WOLFSSL_API __must_check int wc_can_save_vector_registers_x86 ( void ) ;
WOLFSSL_API __must_check int wc_save_vector_registers_x86 ( enum wc_svr_flags flags ) ;
2025-08-28 11:02:45 -05:00
WOLFSSL_API void wc_restore_vector_registers_x86 ( enum wc_svr_flags flags ) ;
2025-07-30 22:15:05 -05:00
# ifndef DISABLE_VECTOR_REGISTERS
2025-07-31 10:37:39 -05:00
# define DISABLE_VECTOR_REGISTERS() wc_save_vector_registers_x86(WC_SVR_FLAG_INHIBIT)
2025-07-30 22:15:05 -05:00
# endif
# ifndef REENABLE_VECTOR_REGISTERS
2025-08-28 11:02:45 -05:00
# define REENABLE_VECTOR_REGISTERS() wc_restore_vector_registers_x86(WC_SVR_FLAG_INHIBIT)
2025-07-30 22:15:05 -05:00
# endif
2025-07-31 10:37:39 -05:00
# else /* !CONFIG_X86 */
# error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unimplemented architecture.
# endif /* !CONFIG_X86 */
# endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
2025-07-30 22:15:05 -05:00
# endif /* !BUILDING_WOLFSSL */
2025-06-30 15:23:04 -05:00
/* Copied from wc_port.h: For FIPS keep the function names the same */
# ifdef HAVE_FIPS
# define wc_InitMutex InitMutex
# define wc_FreeMutex FreeMutex
# define wc_LockMutex LockMutex
# define wc_UnLockMutex UnLockMutex
# endif /* HAVE_FIPS */
# ifdef WOLFSSL_LINUXKM_USE_MUTEXES
# ifdef LINUXKM_LKCAPI_REGISTER
/* must use spin locks when registering implementations with the
* kernel, because mutexes are forbidden when calling with nonzero
* irq_count().
*/
# error WOLFSSL_LINUXKM_USE_MUTEXES is incompatible with LINUXKM_LKCAPI_REGISTER.
# endif
2025-07-16 13:09:03 -05:00
/* if BUILDING_WOLFSSL, mutex.h will have already been included
* recursively above, with the bevy of warnings suppressed, and the
* below include will be a redundant no-op.
*/
2025-06-30 15:23:04 -05:00
# include <linux/mutex.h>
typedef struct mutex wolfSSL_Mutex ;
# define WOLFSSL_MUTEX_INITIALIZER(lockname) __MUTEX_INITIALIZER(lockname)
/* Linux kernel mutex routines are voids, alas. */
static inline int wc_InitMutex ( wolfSSL_Mutex * m )
{
mutex_init ( m ) ;
return 0 ;
}
static inline int wc_FreeMutex ( wolfSSL_Mutex * m )
{
mutex_destroy ( m ) ;
return 0 ;
}
static inline int wc_LockMutex ( wolfSSL_Mutex * m )
{
2025-07-03 22:09:34 -05:00
if ( in_nmi ( ) | | hardirq_count ( ) | | in_softirq ( ) )
return - 1 ;
2025-06-30 15:23:04 -05:00
mutex_lock ( m ) ;
return 0 ;
}
static inline int wc_UnLockMutex ( wolfSSL_Mutex * m )
{
mutex_unlock ( m ) ;
return 0 ;
}
# else
2025-07-30 22:15:05 -05:00
/* if BUILDING_WOLFSSL, spinlock.h will have already been included
* recursively above, with the bevy of warnings suppressed, and the
* below include will be a redundant no-op.
*/
# include <linux/spinlock.h>
2025-07-16 13:09:03 -05:00
typedef struct wolfSSL_Mutex {
2025-06-30 15:23:04 -05:00
spinlock_t lock ;
unsigned long irq_flags ;
} wolfSSL_Mutex ;
2025-07-16 13:09:03 -05:00
2025-06-30 15:23:04 -05:00
# define WOLFSSL_MUTEX_INITIALIZER(lockname) { .lock =__SPIN_LOCK_UNLOCKED(lockname), .irq_flags = 0 }
static __always_inline int wc_InitMutex ( wolfSSL_Mutex * m )
{
m - > lock = __SPIN_LOCK_UNLOCKED ( m ) ;
m - > irq_flags = 0 ;
return 0 ;
}
static __always_inline int wc_FreeMutex ( wolfSSL_Mutex * m )
{
( void ) m ;
return 0 ;
}
2025-07-16 13:09:03 -05:00
# ifdef __PIE__
/* wc_lkm_LockMutex() can't be used inline in __PIE__ objects, due to
* direct access to pv_ops.
*/
static __always_inline int wc_LockMutex ( wolfSSL_Mutex * m )
2025-06-30 15:23:04 -05:00
{
2025-07-21 19:34:00 -05:00
return WC_LKM_INDIRECT_SYM ( wc_lkm_LockMutex ) ( m ) ;
2025-06-30 15:23:04 -05:00
}
2025-07-16 13:09:03 -05:00
# else /* !__PIE__ */
static __always_inline int wc_LockMutex ( wolfSSL_Mutex * m )
{
return wc_lkm_LockMutex ( m ) ;
}
# endif /* !__PIE__ */
2025-06-30 15:23:04 -05:00
static __always_inline int wc_UnLockMutex ( wolfSSL_Mutex * m )
{
spin_unlock_irqrestore ( & m - > lock , m - > irq_flags ) ;
return 0 ;
}
# endif
/* Undo copied defines from wc_port.h, to avoid redefinition warnings. */
# ifdef HAVE_FIPS
# undef wc_InitMutex
# undef wc_FreeMutex
# undef wc_LockMutex
# undef wc_UnLockMutex
# endif /* HAVE_FIPS */
2022-06-14 09:45:33 -05:00
2023-05-17 01:44:36 -05:00
/* prevent gcc's mm_malloc.h from being included, since it unconditionally
* includes stdlib.h, which is kernel-incompatible.
*/
# define _MM_MALLOC_H_INCLUDED
2025-07-10 00:57:51 -05:00
# ifndef BUILDING_WOLFSSL
# include <linux/slab.h>
# if defined(USE_KVMALLOC) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
# include <linux/mm.h>
# endif
# endif
2023-05-17 01:44:36 -05:00
/* fun fact: since linux commit 59bb47985c, kmalloc with power-of-2 size is
* aligned to the size.
*/
2024-03-01 14:55:37 -06:00
# define WC_LINUXKM_ROUND_UP_P_OF_2(x) ( \
{ \
size_t _alloc_sz = (x); \
2025-04-29 00:42:15 -05:00
if ((_alloc_sz < 8192) && (_alloc_sz != 0)) \
2024-03-01 14:55:37 -06:00
_alloc_sz = 1UL << \
((sizeof(_alloc_sz) * 8UL) - __builtin_clzl(_alloc_sz - 1)); \
_alloc_sz; \
2023-05-17 01:44:36 -05:00
})
2025-07-10 00:57:51 -05:00
# ifdef USE_KVMALLOC
2025-07-09 16:29:04 -05:00
# define malloc(size) kvmalloc_node(WC_LINUXKM_ROUND_UP_P_OF_2(size), (preempt_count() == 0 ? GFP_KERNEL : GFP_ATOMIC), NUMA_NO_NODE)
2023-05-17 01:44:36 -05:00
# define free(ptr) kvfree(ptr)
2025-07-10 00:57:51 -05:00
# ifdef USE_KVREALLOC
2025-07-09 18:22:01 -05:00
# define realloc(ptr, newsize) kvrealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), (preempt_count() == 0 ? GFP_KERNEL : GFP_ATOMIC))
# else
# define realloc(ptr, newsize) ((void)(ptr), (void)(newsize), NULL)
# endif
2023-05-17 01:44:36 -05:00
# else
2025-07-02 16:46:27 -05:00
# define malloc(size) kmalloc(WC_LINUXKM_ROUND_UP_P_OF_2(size), (preempt_count() == 0 ? GFP_KERNEL : GFP_ATOMIC))
2023-05-17 01:44:36 -05:00
# define free(ptr) kfree(ptr)
2025-07-02 16:46:27 -05:00
# define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), (preempt_count() == 0 ? GFP_KERNEL : GFP_ATOMIC))
2023-05-17 01:44:36 -05:00
# endif
2024-02-03 13:46:45 -06:00
# ifndef static_assert
# define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
# define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
# endif
2023-05-17 01:44:36 -05:00
# include <wolfssl/wolfcrypt/memory.h>
2024-01-26 14:07:58 -06:00
# ifdef WOLFSSL_TRACK_MEMORY
2023-05-17 01:44:36 -05:00
# define XMALLOC(s, h, t) ({(void)(h); (void)(t); wolfSSL_Malloc(s);})
2023-12-28 15:06:21 -06:00
# ifdef WOLFSSL_XFREE_NO_NULLNESS_CHECK
# define XFREE(p, h, t) ({(void)(h); (void)(t); wolfSSL_Free(p);})
# else
# define XFREE(p, h, t) ({void* _xp; (void)(h); _xp = (p); if(_xp) wolfSSL_Free(_xp);})
# endif
2023-05-17 01:44:36 -05:00
# define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); wolfSSL_Realloc(p, n);})
# else
2025-07-10 00:57:51 -05:00
# if !defined(XMALLOC_USER) && !defined(XMALLOC_OVERRIDE)
# define XMALLOC(s, h, t) ({(void)(h); (void)(t); malloc(s);})
# ifdef WOLFSSL_XFREE_NO_NULLNESS_CHECK
# define XFREE(p, h, t) ({(void)(h); (void)(t); free(p);})
# else
# define XFREE(p, h, t) ({void* _xp; (void)(h); (void)(t); _xp = (p); if(_xp) free(_xp);})
# endif
# if defined(USE_KVREALLOC) || !defined(USE_KVMALLOC)
# define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); realloc(p, n);})
# endif
# endif /* !XMALLOC_USER && !XMALLOC_OVERRIDE */
2023-05-17 01:44:36 -05:00
# endif
2022-01-07 22:39:38 -06:00
2022-05-04 13:16:45 -05:00
# include <linux/limits.h>
2022-01-07 22:39:38 -06:00
2024-11-01 11:56:50 -05:00
# ifndef INT32_MAX
# define INT32_MAX INT_MAX
# endif
# ifndef UINT32_MAX
# define UINT32_MAX UINT_MAX
# endif
2022-05-04 13:16:45 -05:00
/* Linux headers define these using C expressions, but we need
* them to be evaluable by the preprocessor, for use in sp_int.h.
*/
# if BITS_PER_LONG == 64
2024-02-03 13:46:45 -06:00
static_assert ( sizeof ( ULONG_MAX ) = = 8 ,
2022-05-04 13:16:45 -05:00
" BITS_PER_LONG is 64, but ULONG_MAX is not. " ) ;
2022-01-07 22:39:38 -06:00
2022-05-04 13:16:45 -05:00
# undef UCHAR_MAX
# define UCHAR_MAX 255
# undef USHRT_MAX
# define USHRT_MAX 65535
# undef UINT_MAX
# define UINT_MAX 4294967295U
# undef ULONG_MAX
# define ULONG_MAX 18446744073709551615UL
# undef ULLONG_MAX
# define ULLONG_MAX ULONG_MAX
# undef INT_MAX
# define INT_MAX 2147483647
# undef LONG_MAX
# define LONG_MAX 9223372036854775807L
# undef LLONG_MAX
# define LLONG_MAX LONG_MAX
2022-01-07 22:39:38 -06:00
2022-05-04 13:16:45 -05:00
# elif BITS_PER_LONG == 32
2024-02-03 13:46:45 -06:00
static_assert ( sizeof ( ULONG_MAX ) = = 4 ,
2022-05-04 13:16:45 -05:00
" BITS_PER_LONG is 32, but ULONG_MAX is not. " ) ;
# undef UCHAR_MAX
# define UCHAR_MAX 255
# undef USHRT_MAX
# define USHRT_MAX 65535
# undef UINT_MAX
# define UINT_MAX 4294967295U
# undef ULONG_MAX
# define ULONG_MAX 4294967295UL
# undef INT_MAX
# define INT_MAX 2147483647
# undef LONG_MAX
# define LONG_MAX 2147483647L
# undef ULLONG_MAX
# undef LLONG_MAX
# if BITS_PER_LONG_LONG == 64
# define ULLONG_MAX 18446744073709551615UL
# define LLONG_MAX 9223372036854775807L
# else
# undef NO_64BIT
# define NO_64BIT
# define ULLONG_MAX ULONG_MAX
# define LLONG_MAX LONG_MAX
# endif
# else
# error unexpected BITS_PER_LONG value.
# endif
2022-01-07 22:39:38 -06:00
# endif /* LINUXKM_WC_PORT_H */