mirror of
https://github.com/wolfSSL/wolfssl.git
synced 2025-08-05 13:44:41 +02:00
Merge pull request #9007 from douzzer/20250715-linuxkm-portability-fixes
20250715-linuxkm-portability-fixes
This commit is contained in:
@@ -451,6 +451,7 @@ REDIRECTION_OUT1_KEYID
|
||||
REDIRECTION_OUT2_KEYELMID
|
||||
REDIRECTION_OUT2_KEYID
|
||||
RENESAS_T4_USE
|
||||
RHEL_MAJOR
|
||||
RTC_ALARMSUBSECONDMASK_ALL
|
||||
RTE_CMSIS_RTOS_RTX
|
||||
RTOS_MODULE_NET_AVAIL
|
||||
|
@@ -9613,7 +9613,7 @@ then
|
||||
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_SHA2" ;;
|
||||
'sha3') test "$ENABLED_SHA3" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: SHA-3 implementation not enabled.])
|
||||
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_SHA3" ;;
|
||||
'all-sha') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_SHA1 -DLINUXKM_LKCAPI_REGISTER_SHA2 -DLINUXKM_LKCAPI_REGISTER_SHA3" ;;
|
||||
'all-sha') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_SHA_ALL" ;;
|
||||
'hmac(sha1)') test "$ENABLED_SHA" != "no" && test "$ENABLED_HMAC" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: SHA-1 HMAC implementation not enabled.])
|
||||
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_SHA1_HMAC" ;;
|
||||
'hmac(sha2)') (test "$ENABLED_SHA224" != "no" || test "$ENABLED_SHA256" != "no" || test "$ENABLED_SHA384" != "no" || test "$ENABLED_SHA512" != "no") && \
|
||||
@@ -9622,7 +9622,7 @@ then
|
||||
'hmac(sha3)') test "$ENABLED_SHA3" != "no" && test "$ENABLED_HMAC" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: SHA-3 HMAC implementation not enabled.])
|
||||
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_SHA3_HMAC" ;;
|
||||
'all-hmac') test "$ENABLED_HMAC" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: HMAC implementation not enabled.])
|
||||
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_SHA1_HMAC -DLINUXKM_LKCAPI_REGISTER_SHA2_HMAC -DLINUXKM_LKCAPI_REGISTER_SHA3_HMAC" ;;
|
||||
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_HMAC_ALL" ;;
|
||||
'stdrng') test "$ENABLED_HASHDRBG" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: HASHDRBG implementation not enabled.])
|
||||
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_HASH_DRBG" ;;
|
||||
'stdrng-default') test "$ENABLED_HASHDRBG" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: HASHDRBG implementation not enabled.])
|
||||
@@ -9648,11 +9648,11 @@ then
|
||||
'-sha1') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA1" ;;
|
||||
'-sha2') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA2" ;;
|
||||
'-sha3') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA3" ;;
|
||||
'-all-sha') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA1 -DLINUXKM_LKCAPI_DONT_REGISTER_SHA2 -DLINUXKM_LKCAPI_DONT_REGISTER_SHA3" ;;
|
||||
'-all-sha') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA_ALL" ;;
|
||||
'-hmac(sha1)') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA1_HMAC" ;;
|
||||
'-hmac(sha2)') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA2_HMAC" ;;
|
||||
'-hmac(sha3)') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA3_HMAC" ;;
|
||||
'-all-hmac') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_SHA1_HMAC -DLINUXKM_LKCAPI_DONT_REGISTER_SHA2_HMAC -DLINUXKM_LKCAPI_DONT_REGISTER_SHA3_HMAC" ;;
|
||||
'-all-hmac') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_HMAC_ALL" ;;
|
||||
'-stdrng') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_HASH_DRBG" ;;
|
||||
'-stdrng-default') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_HASH_DRBG_DEFAULT" ;;
|
||||
'-ecdsa') AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_DONT_REGISTER_ECDSA" ;;
|
||||
|
@@ -104,7 +104,17 @@ ifeq "$(ENABLED_LINUXKM_PIE)" "yes"
|
||||
PIE_FLAGS := -fPIE -fno-stack-protector -fno-toplevel-reorder
|
||||
PIE_SUPPORT_FLAGS := -DUSE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE
|
||||
ifeq "$(KERNEL_ARCH_X86)" "yes"
|
||||
PIE_FLAGS += -mcmodel=small -mindirect-branch=keep -mfunction-return=keep
|
||||
PIE_FLAGS += -mcmodel=small
|
||||
ifeq "$(CONFIG_MITIGATION_RETPOLINE)" "y"
|
||||
PIE_FLAGS += -mfunction-return=thunk-inline
|
||||
else
|
||||
PIE_FLAGS += -mfunction-return=keep
|
||||
endif
|
||||
ifeq "$(CONFIG_MITIGATION_RETHUNK)" "y"
|
||||
PIE_FLAGS += -mindirect-branch=thunk-inline
|
||||
else
|
||||
PIE_FLAGS += -mindirect-branch=keep
|
||||
endif
|
||||
endif
|
||||
ifeq "$(KERNEL_ARCH)" "mips"
|
||||
PIE_FLAGS += -mabicalls
|
||||
@@ -193,14 +203,19 @@ endif
|
||||
--rename-section .rodata=.rodata.wolfcrypt \
|
||||
--rename-section .rodata.str1.1=.rodata.wolfcrypt \
|
||||
--rename-section .rodata.str1.8=.rodata.wolfcrypt \
|
||||
--rename-section .rodata.cst16=.rodata.wolfcrypt \
|
||||
--rename-section .data=.data.wolfcrypt \
|
||||
--rename-section .data.rel.local=.data.wolfcrypt \
|
||||
--rename-section .bss=.bss.wolfcrypt "$$file" || exit $$?
|
||||
done
|
||||
[ "$(KERNEL_ARCH_X86)" != "yes" ] || \
|
||||
{ $(READELF) --syms $(WOLFCRYPT_PIE_FILES) | \
|
||||
{ $(READELF) --sections --syms --wide $(WOLFCRYPT_PIE_FILES) | \
|
||||
$(AWK) -v obj="$(obj)" ' \
|
||||
/File:/ { \
|
||||
/^File:/ { \
|
||||
phase = 0; \
|
||||
delete wolfcrypt_data_sections; \
|
||||
delete wolfcrypt_text_sections; \
|
||||
delete other_sections; \
|
||||
if (substr($$2, 1, length(obj)) == obj) { \
|
||||
curfile = substr($$2, length(obj) + 2); \
|
||||
} else { \
|
||||
@@ -208,20 +223,65 @@ endif
|
||||
} \
|
||||
next; \
|
||||
} \
|
||||
/^Section Headers:/ { \
|
||||
phase = 1; \
|
||||
next; \
|
||||
} \
|
||||
/^Symbol table / { \
|
||||
phase = 2; \
|
||||
next; \
|
||||
} \
|
||||
{ \
|
||||
if (($$4 == "SECTION") && ($$8 !~ "wolfcrypt")) {\
|
||||
if (! ((curfile ";" $$8) in warned_on)) { \
|
||||
print curfile ": " $$8 >"/dev/stderr"; \
|
||||
warned_on[curfile ": " $$8] = 1; \
|
||||
if (phase == 1) { \
|
||||
if (match($$0, "^ *\\[ *([0-9]+)\\] +([^ ]+) ", a)) {\
|
||||
switch (a[2]) { \
|
||||
case ".text.wolfcrypt": \
|
||||
{ \
|
||||
wolfcrypt_text_sections[a[1]] = a[2]; \
|
||||
next; \
|
||||
} \
|
||||
case /^\.(data|rodata|bss)\.wolfcrypt$$/: \
|
||||
{ \
|
||||
wolfcrypt_data_sections[a[1]] = a[2]; \
|
||||
next; \
|
||||
} \
|
||||
default: \
|
||||
{ \
|
||||
other_sections[a[1]] = a[2]; \
|
||||
} \
|
||||
} \
|
||||
next; \
|
||||
} \
|
||||
next; \
|
||||
} \
|
||||
else if (phase == 2) { \
|
||||
if ($$4 == "FUNC") { \
|
||||
if (! ($$7 in wolfcrypt_text_sections)) { \
|
||||
print curfile ": " $$4 " " $$8 " " other_sections[$$7] >"/dev/stderr"; \
|
||||
++warnings; \
|
||||
}}} \
|
||||
} \
|
||||
next; \
|
||||
} \
|
||||
else if ($$4 == "OBJECT") { \
|
||||
if (! ($$7 in wolfcrypt_data_sections)) { \
|
||||
if ((other_sections[$$7] == ".printk_index") || \
|
||||
(($$8 ~ /^_entry\.[0-9]+$$|^kernel_read_file_str$$/) && \
|
||||
(other_sections[$$7] == ".data.rel.ro.local"))) \
|
||||
next; \
|
||||
print curfile ": " $$4 " " $$8 " " other_sections[$$7] >"/dev/stderr"; \
|
||||
++warnings; \
|
||||
} \
|
||||
next; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
END { \
|
||||
if (warnings) { \
|
||||
exit(1); \
|
||||
} else { \
|
||||
exit(0); \
|
||||
}}'; } || \
|
||||
{ echo 'Error: section(s) missed by containerization.' >&2; exit 1; }
|
||||
{ echo 'Error: symbol(s) missed by containerization.' >&2; exit 1; }
|
||||
ifneq "$(quiet)" "silent_"
|
||||
echo ' wolfCrypt .{text,data,rodata} sections containerized to .{text,data,rodata}.wolfcrypt'
|
||||
endif
|
||||
|
@@ -21,22 +21,6 @@
|
||||
|
||||
/* included by wolfcrypt/src/memory.c */
|
||||
|
||||
#if defined(__PIE__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||
/* needed in 6.1+ because show_free_areas() static definition in mm.h calls
|
||||
* __show_free_areas(), which isn't exported (neither was show_free_areas()).
|
||||
*/
|
||||
void my__show_free_areas(
|
||||
unsigned int flags,
|
||||
nodemask_t *nodemask,
|
||||
int max_zone_idx)
|
||||
{
|
||||
(void)flags;
|
||||
(void)nodemask;
|
||||
(void)max_zone_idx;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__PIE__) && defined(CONFIG_FORTIFY_SOURCE)
|
||||
/* needed because FORTIFY_SOURCE inline implementations call fortify_panic(). */
|
||||
void __my_fortify_panic(const char *name) {
|
||||
|
@@ -111,9 +111,17 @@
|
||||
#define WOLFSSL_NO_FLOAT_FMT
|
||||
#endif
|
||||
|
||||
#ifndef WOLFSSL_LINUXKM_USE_MUTEXES
|
||||
struct wolfSSL_Mutex;
|
||||
extern int wc_lkm_LockMutex(struct wolfSSL_Mutex* m);
|
||||
#endif
|
||||
|
||||
#ifdef BUILDING_WOLFSSL
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) && defined(CONFIG_X86)
|
||||
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))) && \
|
||||
defined(CONFIG_X86)
|
||||
/* linux/slab.h recursively brings in linux/page-flags.h, bringing in
|
||||
* non-inline implementations of functions folio_flags() and
|
||||
* const_folio_flags(). but we can retrofit the attribute.
|
||||
@@ -302,39 +310,32 @@
|
||||
|
||||
#endif /* !CONFIG_FORTIFY_SOURCE */
|
||||
|
||||
#ifndef __PIE__
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#endif
|
||||
|
||||
#ifdef __PIE__
|
||||
/* without this, mm.h brings in static, but not inline, pmd_to_page(),
|
||||
#if defined(HAVE_KVMALLOC) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)) && \
|
||||
!(defined(RHEL_MAJOR) && ((RHEL_MAJOR > 9) || \
|
||||
((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
/* before 5.16, the kvmalloc_node() and kvfree() prototypes were in
|
||||
* mm.h. however, mm.h brings in static, but not inline, pmd_to_page(),
|
||||
* with direct references to global vmem variables.
|
||||
*/
|
||||
#undef USE_SPLIT_PMD_PTLOCKS
|
||||
#define USE_SPLIT_PMD_PTLOCKS 0
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
|
||||
/* without this, static show_free_areas() mm.h brings in direct
|
||||
* reference to unexported __show_free_areas().
|
||||
*/
|
||||
#define __show_free_areas my__show_free_areas
|
||||
void my__show_free_areas(
|
||||
unsigned int flags,
|
||||
nodemask_t *nodemask,
|
||||
int max_zone_idx);
|
||||
#ifdef __PIE__
|
||||
#include <linux/mm_types.h>
|
||||
static __always_inline struct page *pmd_to_page(pmd_t *pmd);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(__PIE__) || (LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0))
|
||||
#include <linux/mm.h>
|
||||
#endif
|
||||
|
||||
#ifndef SINGLE_THREADED
|
||||
#include <linux/kthread.h>
|
||||
#endif
|
||||
#ifndef __PIE__
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/net.h>
|
||||
#endif
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sched.h>
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
|
||||
@@ -424,16 +425,21 @@
|
||||
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && \
|
||||
defined(CONFIG_X86)
|
||||
|
||||
enum wc_svr_flags {
|
||||
WC_SVR_FLAG_INHIBIT = 1,
|
||||
};
|
||||
|
||||
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states(void);
|
||||
extern void free_wolfcrypt_linuxkm_fpu_states(void);
|
||||
extern __must_check int can_save_vector_registers_x86(void);
|
||||
extern __must_check int save_vector_registers_x86(int inhibit_p);
|
||||
extern __must_check int save_vector_registers_x86(enum wc_svr_flags flags);
|
||||
extern void restore_vector_registers_x86(void);
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||
#include <asm/i387.h>
|
||||
#else
|
||||
#include <asm/simd.h>
|
||||
#include <crypto/internal/simd.h>
|
||||
#endif
|
||||
#ifndef CAN_SAVE_VECTOR_REGISTERS
|
||||
#ifdef DEBUG_VECTOR_REGISTER_ACCESS_FUZZING
|
||||
@@ -467,7 +473,7 @@
|
||||
#endif
|
||||
|
||||
#ifndef DISABLE_VECTOR_REGISTERS
|
||||
#define DISABLE_VECTOR_REGISTERS() save_vector_registers_x86(1)
|
||||
#define DISABLE_VECTOR_REGISTERS() save_vector_registers_x86(WC_SVR_FLAG_INHIBIT)
|
||||
#endif
|
||||
#ifndef REENABLE_VECTOR_REGISTERS
|
||||
#define REENABLE_VECTOR_REGISTERS() restore_vector_registers_x86()
|
||||
@@ -658,7 +664,9 @@
|
||||
#endif
|
||||
typeof(kstrtoll) *kstrtoll;
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
typeof(_printk) *_printk;
|
||||
#else
|
||||
typeof(printk) *printk;
|
||||
@@ -697,7 +705,9 @@
|
||||
#ifdef HAVE_KVREALLOC
|
||||
typeof(kvrealloc) *kvrealloc;
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
typeof(kmalloc_trace) *kmalloc_trace;
|
||||
#else
|
||||
typeof(kmem_cache_alloc_trace) *kmem_cache_alloc_trace;
|
||||
@@ -849,6 +859,9 @@
|
||||
typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore;
|
||||
#endif
|
||||
typeof(_cond_resched) *_cond_resched;
|
||||
#ifndef WOLFSSL_LINUXKM_USE_MUTEXES
|
||||
typeof(wc_lkm_LockMutex) *wc_lkm_LockMutex;
|
||||
#endif
|
||||
|
||||
const void *_last_slot;
|
||||
};
|
||||
@@ -895,7 +908,9 @@
|
||||
#endif
|
||||
#define kstrtoll (wolfssl_linuxkm_get_pie_redirect_table()->kstrtoll)
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
#define _printk (wolfssl_linuxkm_get_pie_redirect_table()->_printk)
|
||||
#else
|
||||
#define printk (wolfssl_linuxkm_get_pie_redirect_table()->printk)
|
||||
@@ -935,7 +950,9 @@
|
||||
#ifdef HAVE_KVREALLOC
|
||||
#define kvrealloc (wolfssl_linuxkm_get_pie_redirect_table()->kvrealloc)
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
#define kmalloc_trace (wolfssl_linuxkm_get_pie_redirect_table()->kmalloc_trace)
|
||||
#else
|
||||
#define kmem_cache_alloc_trace (wolfssl_linuxkm_get_pie_redirect_table()->kmem_cache_alloc_trace)
|
||||
@@ -1059,7 +1076,9 @@
|
||||
*/
|
||||
#define key_update wc_key_update
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
#define lkm_printf(format, args...) _printk(KERN_INFO "wolfssl: %s(): " format, __func__, ## args)
|
||||
#else
|
||||
#define lkm_printf(format, args...) printk(KERN_INFO "wolfssl: %s(): " format, __func__, ## args)
|
||||
@@ -1091,11 +1110,6 @@
|
||||
|
||||
#endif /* BUILDING_WOLFSSL */
|
||||
|
||||
/* if BUILDING_WOLFSSL, mutex.h will have already been included recursively
|
||||
* above, with the bevy of warnings suppressed, and the below include will
|
||||
* be a redundant no-op.
|
||||
*/
|
||||
|
||||
/* Copied from wc_port.h: For FIPS keep the function names the same */
|
||||
#ifdef HAVE_FIPS
|
||||
#define wc_InitMutex InitMutex
|
||||
@@ -1113,6 +1127,10 @@
|
||||
#error WOLFSSL_LINUXKM_USE_MUTEXES is incompatible with LINUXKM_LKCAPI_REGISTER.
|
||||
#endif
|
||||
|
||||
/* if BUILDING_WOLFSSL, mutex.h will have already been included
|
||||
* recursively above, with the bevy of warnings suppressed, and the
|
||||
* below include will be a redundant no-op.
|
||||
*/
|
||||
#include <linux/mutex.h>
|
||||
typedef struct mutex wolfSSL_Mutex;
|
||||
#define WOLFSSL_MUTEX_INITIALIZER(lockname) __MUTEX_INITIALIZER(lockname)
|
||||
@@ -1145,10 +1163,11 @@
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
typedef struct {
|
||||
typedef struct wolfSSL_Mutex {
|
||||
spinlock_t lock;
|
||||
unsigned long irq_flags;
|
||||
} wolfSSL_Mutex;
|
||||
|
||||
#define WOLFSSL_MUTEX_INITIALIZER(lockname) { .lock =__SPIN_LOCK_UNLOCKED(lockname), .irq_flags = 0 }
|
||||
|
||||
static __always_inline int wc_InitMutex(wolfSSL_Mutex* m)
|
||||
@@ -1165,35 +1184,24 @@
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __PIE__
|
||||
/* wc_lkm_LockMutex() can't be used inline in __PIE__ objects, due to
|
||||
* direct access to pv_ops.
|
||||
*/
|
||||
static __always_inline int wc_LockMutex(wolfSSL_Mutex *m)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
/* first, try the cheap way. */
|
||||
if (spin_trylock_irqsave(&m->lock, irq_flags)) {
|
||||
m->irq_flags = irq_flags;
|
||||
return 0;
|
||||
return (wolfssl_linuxkm_get_pie_redirect_table()->wc_lkm_LockMutex)(m);
|
||||
}
|
||||
if (irq_count() != 0) {
|
||||
/* Note, this catches calls while SAVE_VECTOR_REGISTERS()ed as
|
||||
* required, because in_softirq() is always true while saved,
|
||||
* even for WC_FPU_INHIBITED_FLAG contexts.
|
||||
*/
|
||||
spin_lock_irqsave(&m->lock, irq_flags);
|
||||
m->irq_flags = irq_flags;
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
for (;;) {
|
||||
if (spin_trylock_irqsave(&m->lock, irq_flags)) {
|
||||
m->irq_flags = irq_flags;
|
||||
return 0;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
__builtin_unreachable();
|
||||
|
||||
#else /* !__PIE__ */
|
||||
|
||||
static __always_inline int wc_LockMutex(wolfSSL_Mutex *m)
|
||||
{
|
||||
return wc_lkm_LockMutex(m);
|
||||
}
|
||||
|
||||
#endif /* !__PIE__ */
|
||||
|
||||
static __always_inline int wc_UnLockMutex(wolfSSL_Mutex* m)
|
||||
{
|
||||
spin_unlock_irqrestore(&m->lock, m->irq_flags);
|
||||
|
@@ -2386,6 +2386,11 @@ static int linuxkm_test_pkcs1pad_driver(const char * driver, int nbits,
|
||||
int n_diff = 0;
|
||||
uint8_t skipped = 0;
|
||||
|
||||
#ifdef LINUXKM_AKCIPHER_NO_SIGNVERIFY
|
||||
(void)hash_oid;
|
||||
(void)hash_len;
|
||||
#endif
|
||||
|
||||
#if !defined(LINUXKM_AKCIPHER_NO_SIGNVERIFY)
|
||||
hash = malloc(hash_len);
|
||||
if (! hash) {
|
||||
|
@@ -93,6 +93,30 @@
|
||||
*/
|
||||
#define WOLFKM_STDRNG_DRIVER ("sha2-256-drbg-nopr" WOLFKM_SHA_DRIVER_SUFFIX)
|
||||
|
||||
#ifdef LINUXKM_LKCAPI_REGISTER_SHA_ALL
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA1
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA2
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA3
|
||||
#endif
|
||||
|
||||
#ifdef LINUXKM_LKCAPI_DONT_REGISTER_SHA_ALL
|
||||
#define LINUXKM_LKCAPI_DONT_REGISTER_SHA1
|
||||
#define LINUXKM_LKCAPI_DONT_REGISTER_SHA2
|
||||
#define LINUXKM_LKCAPI_DONT_REGISTER_SHA3
|
||||
#endif
|
||||
|
||||
#ifdef LINUXKM_LKCAPI_REGISTER_HMAC_ALL
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA1_HMAC
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA2_HMAC
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA3_HMAC
|
||||
#endif
|
||||
|
||||
#ifdef LINUXKM_LKCAPI_DONT_REGISTER_HMAC_ALL
|
||||
#define LINUXKM_LKCAPI_DONT_REGISTER_SHA1_HMAC
|
||||
#define LINUXKM_LKCAPI_DONT_REGISTER_SHA2_HMAC
|
||||
#define LINUXKM_LKCAPI_DONT_REGISTER_SHA3_HMAC
|
||||
#endif
|
||||
|
||||
#ifdef LINUXKM_LKCAPI_REGISTER_SHA2
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA2_224
|
||||
#define LINUXKM_LKCAPI_REGISTER_SHA2_256
|
||||
|
@@ -132,6 +132,37 @@ static int updateFipsHash(void);
|
||||
extern int wolfcrypt_benchmark_main(int argc, char** argv);
|
||||
#endif /* WOLFSSL_LINUXKM_BENCHMARKS */
|
||||
|
||||
#ifndef WOLFSSL_LINUXKM_USE_MUTEXES
|
||||
int wc_lkm_LockMutex(wolfSSL_Mutex* m)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
/* first, try the cheap way. */
|
||||
if (spin_trylock_irqsave(&m->lock, irq_flags)) {
|
||||
m->irq_flags = irq_flags;
|
||||
return 0;
|
||||
}
|
||||
if (irq_count() != 0) {
|
||||
/* Note, this catches calls while SAVE_VECTOR_REGISTERS()ed as
|
||||
* required, because in_softirq() is always true while saved,
|
||||
* even for WC_FPU_INHIBITED_FLAG contexts.
|
||||
*/
|
||||
spin_lock_irqsave(&m->lock, irq_flags);
|
||||
m->irq_flags = irq_flags;
|
||||
return 0;
|
||||
}
|
||||
else {
|
||||
for (;;) {
|
||||
if (spin_trylock_irqsave(&m->lock, irq_flags)) {
|
||||
m->irq_flags = irq_flags;
|
||||
return 0;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
#endif
|
||||
|
||||
WC_MAYBE_UNUSED static int linuxkm_lkcapi_sysfs_install_node(struct kobj_attribute *node, int *installed_flag)
|
||||
{
|
||||
if ((installed_flag == NULL) || (! *installed_flag)) {
|
||||
@@ -503,7 +534,9 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
||||
#endif
|
||||
wolfssl_linuxkm_pie_redirect_table.kstrtoll = kstrtoll;
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
wolfssl_linuxkm_pie_redirect_table._printk = _printk;
|
||||
#else
|
||||
wolfssl_linuxkm_pie_redirect_table.printk = printk;
|
||||
@@ -540,7 +573,9 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
||||
#ifdef HAVE_KVREALLOC
|
||||
wolfssl_linuxkm_pie_redirect_table.kvrealloc = kvrealloc;
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0) || \
|
||||
(defined(RHEL_MAJOR) && \
|
||||
((RHEL_MAJOR > 9) || ((RHEL_MAJOR == 9) && (RHEL_MINOR >= 5))))
|
||||
wolfssl_linuxkm_pie_redirect_table.kmalloc_trace =
|
||||
kmalloc_trace;
|
||||
#else
|
||||
@@ -697,6 +732,10 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
||||
#endif
|
||||
wolfssl_linuxkm_pie_redirect_table._cond_resched = _cond_resched;
|
||||
|
||||
#ifndef WOLFSSL_LINUXKM_USE_MUTEXES
|
||||
wolfssl_linuxkm_pie_redirect_table.wc_lkm_LockMutex = wc_lkm_LockMutex;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
wolfssl_linuxkm_pie_redirect_table.alt_cb_patch_nops = alt_cb_patch_nops;
|
||||
wolfssl_linuxkm_pie_redirect_table.queued_spin_lock_slowpath = queued_spin_lock_slowpath;
|
||||
|
@@ -26,6 +26,14 @@
|
||||
#error x86_vector_register_glue.c included in non-vectorized/non-x86 project.
|
||||
#endif
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_VERBOSE_DEBUG
|
||||
#define VRG_PR_ERR_X pr_err
|
||||
#define VRG_PR_WARN_X pr_warn
|
||||
#else
|
||||
#define VRG_PR_ERR_X pr_err_once
|
||||
#define VRG_PR_WARN_X pr_warn_once
|
||||
#endif
|
||||
|
||||
/* kernel 4.19 -- the most recent LTS before 5.4 -- lacks the necessary safety
|
||||
* checks in __kernel_fpu_begin(), and lacks TIF_NEED_FPU_LOAD.
|
||||
*/
|
||||
@@ -43,7 +51,6 @@ struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_states = NULL;
|
||||
|
||||
#define WC_FPU_COUNT_MASK 0x3fffffffU
|
||||
#define WC_FPU_INHIBITED_FLAG 0x40000000U
|
||||
#define WC_FPU_ALREADY_FLAG 0x80000000U
|
||||
|
||||
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
|
||||
{
|
||||
@@ -55,7 +62,7 @@ WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
|
||||
static int warned_for_repeat_alloc = 0;
|
||||
if (! warned_for_repeat_alloc) {
|
||||
pr_err("BUG: attempt at repeat allocation"
|
||||
" in allocate_wolfcrypt_linuxkm_fpu_states\n");
|
||||
" in allocate_wolfcrypt_linuxkm_fpu_states.\n");
|
||||
warned_for_repeat_alloc = 1;
|
||||
}
|
||||
return BAD_STATE_E;
|
||||
@@ -98,7 +105,7 @@ void free_wolfcrypt_linuxkm_fpu_states(void) {
|
||||
continue;
|
||||
if (i->fpu_state != 0) {
|
||||
pr_err("ERROR: free_wolfcrypt_linuxkm_fpu_states called"
|
||||
" with nonzero state 0x%x for pid %d.\n", i->fpu_state, i_pid);
|
||||
" with nonzero state 0x%x for PID %d.\n", i->fpu_state, i_pid);
|
||||
i->fpu_state = 0;
|
||||
}
|
||||
}
|
||||
@@ -110,8 +117,8 @@ void free_wolfcrypt_linuxkm_fpu_states(void) {
|
||||
/* lock-free O(1)-lookup CPU-local storage facility for tracking recursive fpu
|
||||
* pushing/popping.
|
||||
*
|
||||
* caller must have already called kernel_fpu_begin() or preempt_disable()
|
||||
* before entering this or the streamlined inline version of it below.
|
||||
* caller must have already locked itself on its CPU before entering this, or
|
||||
* entering the streamlined inline version of it below.
|
||||
*/
|
||||
static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int create_p) {
|
||||
int my_cpu = raw_smp_processor_id();
|
||||
@@ -133,7 +140,7 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int c
|
||||
#endif
|
||||
{
|
||||
if (_warned_on_null == 0) {
|
||||
pr_err("BUG: wc_linuxkm_fpu_state_assoc called by pid %d"
|
||||
pr_err("BUG: wc_linuxkm_fpu_state_assoc called by PID %d"
|
||||
" before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
|
||||
_warned_on_null = 1;
|
||||
}
|
||||
@@ -149,8 +156,8 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int c
|
||||
static int _warned_on_redundant_create_p = 0;
|
||||
if (_warned_on_redundant_create_p < 10) {
|
||||
pr_err("BUG: wc_linuxkm_fpu_state_assoc called with create_p=1 by"
|
||||
" pid %d on cpu %d with cpu slot already reserved by"
|
||||
" said pid.\n", my_pid, my_cpu);
|
||||
" PID %d on CPU %d with CPU slot already reserved by"
|
||||
" said PID.\n", my_pid, my_cpu);
|
||||
++_warned_on_redundant_create_p;
|
||||
}
|
||||
}
|
||||
@@ -168,7 +175,7 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int c
|
||||
*/
|
||||
if (find_get_pid(slot_pid) == NULL) {
|
||||
if (__atomic_compare_exchange_n(&slot->pid, &slot_pid, my_pid, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)) {
|
||||
pr_warn("WARNING: wc_linuxkm_fpu_state_assoc_unlikely fixed up orphaned slot owned by dead PID %d.", slot_pid);
|
||||
pr_warn("WARNING: wc_linuxkm_fpu_state_assoc_unlikely fixed up orphaned slot on CPU %d owned by dead PID %d.\n", my_cpu, slot_pid);
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
@@ -176,8 +183,8 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int c
|
||||
{
|
||||
static int _warned_on_mismatched_pid = 0;
|
||||
if (_warned_on_mismatched_pid < 10) {
|
||||
pr_warn("WARNING: wc_linuxkm_fpu_state_assoc called by pid %d on cpu %d"
|
||||
" but cpu slot already reserved by pid %d.\n",
|
||||
pr_warn("WARNING: wc_linuxkm_fpu_state_assoc called by pid %d on CPU %d"
|
||||
" but CPU slot already reserved by pid %d.\n",
|
||||
my_pid, my_cpu, slot_pid);
|
||||
++_warned_on_mismatched_pid;
|
||||
}
|
||||
@@ -272,8 +279,8 @@ static void wc_linuxkm_fpu_state_release_unlikely(
|
||||
if (ent->fpu_state != 0) {
|
||||
static int warned_nonzero_fpu_state = 0;
|
||||
if (! warned_nonzero_fpu_state) {
|
||||
pr_err("wc_linuxkm_fpu_state_free for pid %d"
|
||||
" with nonzero fpu_state 0x%x.\n", ent->pid, ent->fpu_state);
|
||||
VRG_PR_ERR_X("ERROR: wc_linuxkm_fpu_state_free for pid %d on CPU %d"
|
||||
" with nonzero fpu_state 0x%x.\n", ent->pid, raw_smp_processor_id(), ent->fpu_state);
|
||||
warned_nonzero_fpu_state = 1;
|
||||
}
|
||||
ent->fpu_state = 0;
|
||||
@@ -295,17 +302,12 @@ WARN_UNUSED_RESULT int can_save_vector_registers_x86(void)
|
||||
|
||||
/* check for hard interrupt context (unusable current->pid) preemptively.
|
||||
* if we're in a softirq context we'll catch that below with
|
||||
* irq_fpu_usable().
|
||||
* a second preempt_count() check.
|
||||
*/
|
||||
if (((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0) || (task_pid_nr(current) == 0))
|
||||
return 0;
|
||||
|
||||
/* Check if we're already saved, per wc_linuxkm_fpu_states.
|
||||
*
|
||||
* On kernel >= 6.15, irq_fpu_usable() dumps a backtrace to the kernel log
|
||||
* if called while already saved, so it's crucial to preempt that call by
|
||||
* checking wc_linuxkm_fpu_states.
|
||||
*/
|
||||
/* Check if we're already saved, per wc_linuxkm_fpu_states. */
|
||||
pstate = wc_linuxkm_fpu_state_assoc(0, 0);
|
||||
|
||||
if ((pstate != NULL) && (pstate->fpu_state != 0U)) {
|
||||
@@ -321,52 +323,31 @@ WARN_UNUSED_RESULT int can_save_vector_registers_x86(void)
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(TIF_NEED_FPU_LOAD) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) && \
|
||||
! ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 180)) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))) && \
|
||||
! ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 39)) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)))
|
||||
/* Work around a kernel bug -- see linux commit 59f5ede3bc0f0.
|
||||
* irq_fpu_usable() on these older kernels can incorrectly return true,
|
||||
* leading to an impermissible recursive kernel_fpu_begin() that
|
||||
* corrupts the register state. What we really want here is
|
||||
* this_cpu_read(in_kernel_fpu), but in_kernel_fpu is an unexported
|
||||
* static array.
|
||||
*/
|
||||
if (irq_fpu_usable() && !test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
return 1;
|
||||
else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0))
|
||||
return 0;
|
||||
else if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
if ((preempt_count() == 0) || may_use_simd())
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
#else
|
||||
if (irq_fpu_usable())
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
WARN_UNUSED_RESULT int save_vector_registers_x86(int inhibit_p)
|
||||
WARN_UNUSED_RESULT int save_vector_registers_x86(enum wc_svr_flags flags)
|
||||
{
|
||||
struct wc_thread_fpu_count_ent *pstate;
|
||||
|
||||
/* check for hard interrupt context (unusable current->pid) preemptively.
|
||||
* if we're in a softirq context we'll catch that below with
|
||||
* irq_fpu_usable().
|
||||
* a second look at preempt_count().
|
||||
*/
|
||||
if (((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0) || (task_pid_nr(current) == 0))
|
||||
if (((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0) || (task_pid_nr(current) == 0)) {
|
||||
VRG_PR_WARN_X("WARNING: save_vector_registers_x86 called with preempt_count 0x%x and pid %d on CPU %d.\n", preempt_count(), task_pid_nr(current), raw_smp_processor_id());
|
||||
return WC_ACCEL_INHIBIT_E;
|
||||
}
|
||||
|
||||
pstate = wc_linuxkm_fpu_state_assoc(0, 0);
|
||||
|
||||
/* allow for nested calls */
|
||||
if (pstate && (pstate->fpu_state != 0U)) {
|
||||
if (unlikely(pstate->fpu_state & WC_FPU_INHIBITED_FLAG)) {
|
||||
if (inhibit_p) {
|
||||
if (flags & WC_SVR_FLAG_INHIBIT) {
|
||||
/* allow recursive inhibit calls as long as the whole stack of
|
||||
* them is inhibiting.
|
||||
*/
|
||||
@@ -376,35 +357,47 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(int inhibit_p)
|
||||
else
|
||||
return WC_ACCEL_INHIBIT_E;
|
||||
}
|
||||
if (unlikely(inhibit_p))
|
||||
if (unlikely(flags & WC_SVR_FLAG_INHIBIT))
|
||||
return BAD_STATE_E;
|
||||
if (unlikely((pstate->fpu_state & WC_FPU_COUNT_MASK)
|
||||
== WC_FPU_COUNT_MASK))
|
||||
{
|
||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
||||
"pid %d.\n", pstate->pid);
|
||||
pr_err("ERROR: save_vector_registers_x86 recursion register overflow for "
|
||||
"pid %d on CPU %d.\n", pstate->pid, raw_smp_processor_id());
|
||||
return BAD_STATE_E;
|
||||
} else {
|
||||
++pstate->fpu_state;
|
||||
return 0;
|
||||
}
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
if (inhibit_p) {
|
||||
if (in_softirq())
|
||||
return WC_ACCEL_INHIBIT_E;
|
||||
if (flags & WC_SVR_FLAG_INHIBIT) {
|
||||
if ((preempt_count() != 0) && !may_use_simd())
|
||||
return WC_ACCEL_INHIBIT_E; /* not an error here, just a
|
||||
* short-circuit result.
|
||||
*/
|
||||
/* we need to inhibit migration and softirqs here to assure that we can
|
||||
* support recursive calls safely, i.e. without mistaking a softirq
|
||||
* context for a recursion.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
/* inhibit migration, which gums up the algorithm in
|
||||
* kernel_fpu_{begin,end}().
|
||||
*/
|
||||
migrate_disable();
|
||||
#endif
|
||||
/* we need to inhibit softirqs to assure that we can support recursive
|
||||
* calls safely, i.e. without mistaking a softirq context for a
|
||||
* recursion.
|
||||
*/
|
||||
local_bh_disable();
|
||||
|
||||
if (preempt_count() == 0) {
|
||||
VRG_PR_ERR_X("BUG: save_vector_registers_x86(): zero preempt_count after local_bh_disable() on CPU %d.\n",
|
||||
raw_smp_processor_id());
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
migrate_enable();
|
||||
#endif
|
||||
local_bh_enable();
|
||||
return WC_ACCEL_INHIBIT_E;
|
||||
}
|
||||
|
||||
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
|
||||
if (pstate == NULL) {
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
@@ -421,41 +414,32 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(int inhibit_p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (irq_fpu_usable()
|
||||
#if defined(TIF_NEED_FPU_LOAD) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) && \
|
||||
! ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 180)) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))) && \
|
||||
! ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 39)) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)))
|
||||
/* Work around a kernel bug -- see linux commit 59f5ede3bc0f0.
|
||||
* irq_fpu_usable() on these older kernels can incorrectly return true,
|
||||
* leading to an impermissible recursive kernel_fpu_begin() that
|
||||
* corrupts the register state. What we really want here is
|
||||
* this_cpu_read(in_kernel_fpu), but in_kernel_fpu is an unexported
|
||||
* static array.
|
||||
if ((preempt_count() == 0) || may_use_simd()) {
|
||||
/* fpregs_lock() calls either local_bh_disable() or preempt_disable()
|
||||
* depending on CONFIG_PREEMPT_RT -- we call both, explicitly.
|
||||
*
|
||||
* empirically, on some kernels, kernel_fpu_begin() doesn't reliably
|
||||
* disable softirqs, indeed doesn't make preempt_count() nonzero, which
|
||||
* breaks our locking algorithm. we sidestep this completely by
|
||||
* explicitly disabling softirq's, preemption, and migration.
|
||||
* helpfully, the calls to do that are all guaranteed recursion-safe.
|
||||
*/
|
||||
&& !test_thread_flag(TIF_NEED_FPU_LOAD)
|
||||
#endif
|
||||
)
|
||||
{
|
||||
/* note there is a bug in kernel <5.17.0 and <5.10.180 -- see linux
|
||||
* commit 59f5ede3bc0f0 -- such that irq_fpu_usable() can incorrectly
|
||||
* return true, leading to an impermissible recursive kernel_fpu_begin()
|
||||
* that corrupts the register state.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
/* inhibit migration, which gums up the algorithm in
|
||||
* kernel_fpu_{begin,end}().
|
||||
*/
|
||||
migrate_disable();
|
||||
#endif
|
||||
local_bh_disable();
|
||||
#if IS_ENABLED(CONFIG_PREEMPT_RT)
|
||||
preempt_disable();
|
||||
#endif
|
||||
kernel_fpu_begin();
|
||||
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
|
||||
if (pstate == NULL) {
|
||||
kernel_fpu_end();
|
||||
#if IS_ENABLED(CONFIG_PREEMPT_RT)
|
||||
preempt_enable();
|
||||
#endif
|
||||
local_bh_enable();
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
migrate_enable();
|
||||
@@ -465,68 +449,37 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(int inhibit_p)
|
||||
|
||||
/* set msb to 0 to trigger kernel_fpu_end() at cleanup. */
|
||||
pstate->fpu_state = 1U;
|
||||
} else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0)) {
|
||||
static int warned_fpu_forbidden = 0;
|
||||
if (! warned_fpu_forbidden)
|
||||
pr_err("save_vector_registers_x86 called from IRQ handler.\n");
|
||||
return WC_ACCEL_INHIBIT_E;
|
||||
}
|
||||
#if defined(TIF_NEED_FPU_LOAD) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) && \
|
||||
! ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 180)) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0))) && \
|
||||
! ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 39)) && \
|
||||
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)))
|
||||
else if (test_thread_flag(TIF_NEED_FPU_LOAD)) {
|
||||
/* assume already safely in_kernel_fpu from caller, but recursively
|
||||
* preempt_disable() to be extra-safe.
|
||||
*/
|
||||
preempt_disable();
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
migrate_disable();
|
||||
#endif
|
||||
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
|
||||
if (pstate == NULL) {
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
migrate_enable();
|
||||
#endif
|
||||
preempt_enable();
|
||||
return BAD_STATE_E;
|
||||
}
|
||||
|
||||
/* set _ALREADY_ flag to 1 to inhibit kernel_fpu_end() at cleanup. */
|
||||
pstate->fpu_state =
|
||||
WC_FPU_ALREADY_FLAG + 1U;
|
||||
}
|
||||
#endif /* TIF_NEED_FPU_LOAD && <5.17.0 && !5.10.180+ */
|
||||
else {
|
||||
static int warned_fpu_forbidden = 0;
|
||||
if (! warned_fpu_forbidden) {
|
||||
pr_err("save_vector_registers_x86 called with !irq_fpu_usable from"
|
||||
" thread without previous FPU save.\n");
|
||||
warned_fpu_forbidden = 1;
|
||||
}
|
||||
return WC_ACCEL_INHIBIT_E;
|
||||
if (preempt_count() == 0) {
|
||||
VRG_PR_ERR_X("BUG: save_vector_registers_x86(): zero preempt_count after kernel_fpu_begin() on CPU %d.\n",
|
||||
raw_smp_processor_id());
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
VRG_PR_WARN_X("WARNING: save_vector_registers_x86 called with no saved state and nonzero preempt_count 0x%x on CPU %d.\n", preempt_count(), raw_smp_processor_id());
|
||||
#ifdef WOLFSSL_LINUXKM_VERBOSE_DEBUG
|
||||
dump_stack();
|
||||
#endif
|
||||
return WC_ACCEL_INHIBIT_E;
|
||||
}
|
||||
|
||||
__builtin_unreachable();
|
||||
}
|
||||
|
||||
void restore_vector_registers_x86(void)
|
||||
{
|
||||
struct wc_thread_fpu_count_ent *pstate;
|
||||
|
||||
if (in_nmi() || hardirq_count() || (task_pid_nr(current) == 0)) {
|
||||
pr_warn("BUG: restore_vector_registers_x86() called from interrupt handler on CPU %d.",
|
||||
if (((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0) || (task_pid_nr(current) == 0)) {
|
||||
VRG_PR_WARN_X("BUG: restore_vector_registers_x86() called from interrupt handler on CPU %d.\n",
|
||||
raw_smp_processor_id());
|
||||
return;
|
||||
}
|
||||
|
||||
pstate = wc_linuxkm_fpu_state_assoc(0, 1);
|
||||
if (unlikely(pstate == NULL)) {
|
||||
pr_warn("BUG: restore_vector_registers_x86() called by pid %d on CPU %d "
|
||||
VRG_PR_WARN_X("BUG: restore_vector_registers_x86() called by pid %d on CPU %d "
|
||||
"with no saved state.\n", task_pid_nr(current),
|
||||
raw_smp_processor_id());
|
||||
return;
|
||||
@@ -539,15 +492,16 @@ void restore_vector_registers_x86(void)
|
||||
if (pstate->fpu_state == 0U) {
|
||||
wc_linuxkm_fpu_state_release(pstate);
|
||||
kernel_fpu_end();
|
||||
#if IS_ENABLED(CONFIG_PREEMPT_RT)
|
||||
preempt_enable();
|
||||
#endif
|
||||
local_bh_enable();
|
||||
} else if (unlikely(pstate->fpu_state & WC_FPU_INHIBITED_FLAG)) {
|
||||
pstate->fpu_state = 0U;
|
||||
wc_linuxkm_fpu_state_release(pstate);
|
||||
local_bh_enable();
|
||||
} else if (unlikely(pstate->fpu_state & WC_FPU_ALREADY_FLAG)) {
|
||||
pstate->fpu_state = 0U;
|
||||
wc_linuxkm_fpu_state_release(pstate);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
migrate_enable();
|
||||
|
Reference in New Issue
Block a user