mirror of
https://github.com/wolfSSL/wolfssl.git
synced 2025-07-29 18:27:29 +02:00
Merge pull request #6418 from douzzer/20230517-linuxkm-benchmarks
20230517-linuxkm-benchmarks
This commit is contained in:
@ -218,7 +218,7 @@ if BUILD_LINUXKM
|
||||
CFLAGS_FPU_DISABLE CFLAGS_FPU_ENABLE CFLAGS_SIMD_DISABLE CFLAGS_SIMD_ENABLE \
|
||||
CFLAGS_AUTO_VECTORIZE_DISABLE CFLAGS_AUTO_VECTORIZE_ENABLE \
|
||||
ASFLAGS_FPU_DISABLE_SIMD_ENABLE ASFLAGS_FPU_ENABLE_SIMD_DISABLE \
|
||||
ASFLAGS_FPUSIMD_DISABLE ASFLAGS_FPUSIMD_ENABLE
|
||||
ASFLAGS_FPUSIMD_DISABLE ASFLAGS_FPUSIMD_ENABLE ENABLED_LINUXKM_BENCHMARKS
|
||||
|
||||
module:
|
||||
+$(MAKE) -C linuxkm libwolfssl.ko
|
||||
|
20
configure.ac
20
configure.ac
@ -495,6 +495,16 @@ then
|
||||
fi
|
||||
AC_SUBST([ENABLED_LINUXKM_PIE])
|
||||
|
||||
AC_ARG_ENABLE([linuxkm-benchmarks],
|
||||
[AS_HELP_STRING([--enable-linuxkm-benchmarks],[Enable crypto benchmarking autorun at module load time for Linux kernel module (default: disabled)])],
|
||||
[ENABLED_LINUXKM_BENCHMARKS=$enableval],
|
||||
[ENABLED_LINUXKM_BENCHMARKS=no]
|
||||
)
|
||||
if test "$ENABLED_LINUXKM_BENCHMARKS" = "yes"
|
||||
then
|
||||
AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_LINUXKM_BENCHMARKS"
|
||||
fi
|
||||
AC_SUBST([ENABLED_LINUXKM_BENCHMARKS])
|
||||
|
||||
if test "$ENABLED_LINUXKM_DEFAULTS" = "yes"
|
||||
then
|
||||
@ -7369,6 +7379,11 @@ fi
|
||||
|
||||
if test "$ENABLED_ASYNCCRYPT" = "yes"
|
||||
then
|
||||
if ! test -f ${srcdir}/wolfcrypt/src/async.c || ! test -f ${srcdir}/wolfssl/wolfcrypt/async.h
|
||||
then
|
||||
AC_MSG_ERROR([--enable-asynccrypt requested, but WOLFSSL_ASYNC_CRYPT source files are missing.])
|
||||
fi
|
||||
|
||||
AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_ASYNC_CRYPT -DHAVE_WOLF_EVENT -DHAVE_WOLF_BIGINT -DWOLFSSL_NO_HASH_RAW"
|
||||
|
||||
# If no async backend (hardware or software) has been explicitly enabled,
|
||||
@ -8810,9 +8825,9 @@ then
|
||||
for header in $openssl_headers
|
||||
do
|
||||
AC_CHECK_HEADER([$header], [], [
|
||||
AC_MSG_ERROR([Error including $header. Possible circular dependency introduced or missing include.])
|
||||
AC_MSG_ERROR([Header file inconsistency detected -- error including ${header}.])
|
||||
], [
|
||||
#include <wolfssl/options.h>
|
||||
#include <${OPTION_FILE}>
|
||||
extern int dummy_int_to_make_compiler_happy;
|
||||
])
|
||||
done
|
||||
@ -8852,6 +8867,7 @@ echo " * FPU enable as flags: $ASFLAGS_FPU_ENABLE_SIMD_DISABLE" && \
|
||||
echo " * SIMD+FPU disable as flags: $ASFLAGS_FPUSIMD_DISABLE" && \
|
||||
echo " * SIMD+FPU enable as flags: $ASFLAGS_FPUSIMD_ENABLE" && \
|
||||
echo " * Linux kernel module PIE: $ENABLED_LINUXKM_PIE"
|
||||
echo " * Linux kernel module bench: $ENABLED_LINUXKM_BENCHMARKS"
|
||||
|
||||
echo " * Debug enabled: $ax_enable_debug"
|
||||
echo " * Coverage enabled: $ax_enable_coverage"
|
||||
|
@ -90,6 +90,11 @@ ifeq "$(ENABLED_LINUXKM_PIE)" "yes"
|
||||
$(obj)/linuxkm/module_hooks.o: ccflags-y += $(PIE_SUPPORT_FLAGS)
|
||||
endif
|
||||
|
||||
ifeq "$(ENABLED_LINUXKM_BENCHMARKS)" "yes"
|
||||
$(obj)/linuxkm/module_hooks.o: ccflags-y = $(WOLFSSL_CFLAGS) $(CFLAGS_FPU_ENABLE) $(CFLAGS_SIMD_ENABLE) $(PIE_SUPPORT_FLAGS)
|
||||
$(obj)/linuxkm/module_hooks.o: asflags-y = $(WOLFSSL_ASFLAGS) $(ASFLAGS_FPU_ENABLE_SIMD_DISABLE)
|
||||
endif
|
||||
|
||||
asflags-y := $(WOLFSSL_ASFLAGS) $(ASFLAGS_FPUSIMD_DISABLE)
|
||||
|
||||
# vectorized implementations that are kernel-safe are listed here.
|
||||
|
@ -21,320 +21,262 @@
|
||||
|
||||
/* included by wolfcrypt/src/memory.c */
|
||||
|
||||
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
static union fpregs_state **wolfcrypt_linuxkm_fpu_states = NULL;
|
||||
#else
|
||||
static struct fpstate **wolfcrypt_linuxkm_fpu_states = NULL;
|
||||
#endif
|
||||
#else
|
||||
static unsigned int *wolfcrypt_linuxkm_fpu_states = NULL;
|
||||
#define WC_FPU_COUNT_MASK 0x7fffffffU
|
||||
#define WC_FPU_SAVED_MASK 0x80000000U
|
||||
#endif
|
||||
#ifdef HAVE_KVMALLOC
|
||||
/* adapted from kvrealloc() draft by Changli Gao, 2010-05-13 */
|
||||
void *lkm_realloc(void *ptr, size_t newsize) {
|
||||
void *nptr;
|
||||
size_t oldsize;
|
||||
|
||||
static WARN_UNUSED_RESULT inline int am_in_hard_interrupt_handler(void)
|
||||
{
|
||||
return (preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0;
|
||||
if (unlikely(newsize == 0)) {
|
||||
kvfree(ptr);
|
||||
return ZERO_SIZE_PTR;
|
||||
}
|
||||
|
||||
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
|
||||
{
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
wolfcrypt_linuxkm_fpu_states =
|
||||
(union fpregs_state **)kzalloc(nr_cpu_ids
|
||||
* sizeof(struct fpu_state *),
|
||||
GFP_KERNEL);
|
||||
#else
|
||||
wolfcrypt_linuxkm_fpu_states =
|
||||
(struct fpstate **)kzalloc(nr_cpu_ids
|
||||
* sizeof(struct fpstate *),
|
||||
GFP_KERNEL);
|
||||
#endif
|
||||
#else
|
||||
wolfcrypt_linuxkm_fpu_states =
|
||||
(unsigned int *)kzalloc(nr_cpu_ids * sizeof(unsigned int),
|
||||
GFP_KERNEL);
|
||||
#endif
|
||||
if (unlikely(ptr == NULL))
|
||||
return kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
||||
|
||||
if (! wolfcrypt_linuxkm_fpu_states) {
|
||||
pr_err("warning, allocation of %lu bytes for "
|
||||
"wolfcrypt_linuxkm_fpu_states failed.\n",
|
||||
nr_cpu_ids * sizeof(struct fpu_state *));
|
||||
return MEMORY_E;
|
||||
}
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
{
|
||||
typeof(nr_cpu_ids) i;
|
||||
for (i=0; i<nr_cpu_ids; ++i) {
|
||||
_Static_assert(sizeof(union fpregs_state) <= PAGE_SIZE,
|
||||
"union fpregs_state is larger than expected.");
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
wolfcrypt_linuxkm_fpu_states[i] =
|
||||
(union fpregs_state *)kzalloc(PAGE_SIZE
|
||||
/* sizeof(union fpregs_state) */,
|
||||
GFP_KERNEL);
|
||||
#else
|
||||
wolfcrypt_linuxkm_fpu_states[i] =
|
||||
(struct fpstate *)kzalloc(PAGE_SIZE
|
||||
/* sizeof(struct fpstate) */,
|
||||
GFP_KERNEL);
|
||||
#endif
|
||||
if (! wolfcrypt_linuxkm_fpu_states[i])
|
||||
break;
|
||||
/* double-check that the allocation is 64-byte-aligned as needed
|
||||
* for xsave.
|
||||
*/
|
||||
if ((unsigned long)wolfcrypt_linuxkm_fpu_states[i] & 63UL) {
|
||||
pr_err("warning, allocation for wolfcrypt_linuxkm_fpu_states "
|
||||
"was not properly aligned (%px).\n",
|
||||
wolfcrypt_linuxkm_fpu_states[i]);
|
||||
kfree(wolfcrypt_linuxkm_fpu_states[i]);
|
||||
wolfcrypt_linuxkm_fpu_states[i] = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i < nr_cpu_ids) {
|
||||
pr_err("warning, only %u/%u allocations succeeded for "
|
||||
"wolfcrypt_linuxkm_fpu_states.\n",
|
||||
i, nr_cpu_ids);
|
||||
return MEMORY_E;
|
||||
}
|
||||
}
|
||||
#endif /* LINUXKM_SIMD_IRQ */
|
||||
return 0;
|
||||
}
|
||||
if (is_vmalloc_addr(ptr)) {
|
||||
/* no way to discern the size of the old allocation,
|
||||
* because the kernel doesn't export find_vm_area(). if
|
||||
* it did, we could then call get_vm_area_size() on the
|
||||
* returned struct vm_struct.
|
||||
*/
|
||||
return NULL;
|
||||
} else {
|
||||
#ifndef __PIE__
|
||||
struct page *page;
|
||||
|
||||
void free_wolfcrypt_linuxkm_fpu_states(void)
|
||||
{
|
||||
if (wolfcrypt_linuxkm_fpu_states) {
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
typeof(nr_cpu_ids) i;
|
||||
for (i=0; i<nr_cpu_ids; ++i) {
|
||||
if (wolfcrypt_linuxkm_fpu_states[i])
|
||||
kfree(wolfcrypt_linuxkm_fpu_states[i]);
|
||||
}
|
||||
#endif /* LINUXKM_SIMD_IRQ */
|
||||
kfree(wolfcrypt_linuxkm_fpu_states);
|
||||
wolfcrypt_linuxkm_fpu_states = 0;
|
||||
}
|
||||
}
|
||||
|
||||
WARN_UNUSED_RESULT int save_vector_registers_x86(void)
|
||||
{
|
||||
int processor_id;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
processor_id = smp_processor_id();
|
||||
|
||||
{
|
||||
static int _warned_on_null = -1;
|
||||
if ((wolfcrypt_linuxkm_fpu_states == NULL)
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
|| (wolfcrypt_linuxkm_fpu_states[processor_id] == NULL)
|
||||
#endif
|
||||
)
|
||||
{
|
||||
preempt_enable();
|
||||
if (_warned_on_null < processor_id) {
|
||||
_warned_on_null = processor_id;
|
||||
pr_err("save_vector_registers_x86 called for cpu id %d "
|
||||
"with null context buffer.\n", processor_id);
|
||||
}
|
||||
return BAD_STATE_E;
|
||||
}
|
||||
}
|
||||
|
||||
if (! irq_fpu_usable()) {
|
||||
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
if (am_in_hard_interrupt_handler()) {
|
||||
|
||||
/* allow for nested calls */
|
||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
|
||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
|
||||
preempt_enable();
|
||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
||||
"cpu id %d.\n", processor_id);
|
||||
return BAD_STATE_E;
|
||||
} else {
|
||||
++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
/* note, fpregs_lock() is not needed here, because
|
||||
* interrupts/preemptions are already disabled here.
|
||||
*/
|
||||
{
|
||||
/* save_fpregs_to_fpstate() only accesses fpu->state, which
|
||||
* has stringent alignment requirements (64 byte cache
|
||||
* line), but takes a pointer to the parent struct. work
|
||||
* around this.
|
||||
*/
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
||||
struct fpu *fake_fpu_pointer =
|
||||
(struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
|
||||
- offsetof(struct fpu, state));
|
||||
copy_fpregs_to_fpstate(fake_fpu_pointer);
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
struct fpu *fake_fpu_pointer =
|
||||
(struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
|
||||
- offsetof(struct fpu, state));
|
||||
save_fpregs_to_fpstate(fake_fpu_pointer);
|
||||
#else
|
||||
struct fpu *fake_fpu_pointer =
|
||||
(struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
|
||||
- offsetof(struct fpu, fpstate));
|
||||
save_fpregs_to_fpstate(fake_fpu_pointer);
|
||||
#endif
|
||||
}
|
||||
/* mark the slot as used. */
|
||||
((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
|
||||
/* note, not preempt_enable()ing, mirroring kernel_fpu_begin()
|
||||
* semantics, even though routine will have been entered already
|
||||
* non-preemptable.
|
||||
*/
|
||||
return 0;
|
||||
} else
|
||||
#endif /* LINUXKM_SIMD_IRQ */
|
||||
{
|
||||
preempt_enable();
|
||||
return BAD_STATE_E;
|
||||
}
|
||||
page = virt_to_head_page(ptr);
|
||||
if (PageSlab(page) || PageCompound(page)) {
|
||||
if (newsize < PAGE_SIZE)
|
||||
#endif /* ! __PIE__ */
|
||||
return krealloc(ptr, newsize, GFP_KERNEL);
|
||||
#ifndef __PIE__
|
||||
oldsize = ksize(ptr);
|
||||
} else {
|
||||
oldsize = page->private;
|
||||
if (newsize <= oldsize)
|
||||
return ptr;
|
||||
}
|
||||
#endif /* ! __PIE__ */
|
||||
}
|
||||
|
||||
/* allow for nested calls */
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
|
||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
|
||||
preempt_enable();
|
||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
||||
"cpu id %d.\n", processor_id);
|
||||
return BAD_STATE_E;
|
||||
} else {
|
||||
++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
kernel_fpu_begin();
|
||||
preempt_enable(); /* kernel_fpu_begin() does its own
|
||||
* preempt_disable(). decrement ours.
|
||||
*/
|
||||
((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
|
||||
#else /* !LINUXKM_SIMD_IRQ */
|
||||
if (wolfcrypt_linuxkm_fpu_states[processor_id] != 0U) {
|
||||
if ((wolfcrypt_linuxkm_fpu_states[processor_id] & WC_FPU_COUNT_MASK)
|
||||
== WC_FPU_COUNT_MASK)
|
||||
{
|
||||
preempt_enable();
|
||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
||||
"cpu id %d.\n", processor_id);
|
||||
return BAD_STATE_E;
|
||||
} else {
|
||||
++wolfcrypt_linuxkm_fpu_states[processor_id];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
nptr = kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
||||
if (nptr != NULL) {
|
||||
memcpy(nptr, ptr, oldsize);
|
||||
kvfree(ptr);
|
||||
}
|
||||
|
||||
/* if kernel_fpu_begin() won't actually save the reg file (because
|
||||
* it was already saved and invalidated, or because we're in a
|
||||
* kernel thread), don't call kernel_fpu_begin() here, nor call
|
||||
* kernel_fpu_end() in cleanup. this avoids pointless overhead. in
|
||||
* kernels >=5.17.12 (from changes to irq_fpu_usable() in linux
|
||||
* commit 59f5ede3bc0f, backported somewhere >5.17.5), this also
|
||||
* fixes register corruption.
|
||||
*/
|
||||
if ((current->flags & PF_KTHREAD) ||
|
||||
test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
{
|
||||
wolfcrypt_linuxkm_fpu_states[processor_id] =
|
||||
WC_FPU_SAVED_MASK + 1U; /* set msb 1 to inhibit kernel_fpu_end() at cleanup. */
|
||||
/* keep preempt_disable()d from above. */
|
||||
} else {
|
||||
kernel_fpu_begin();
|
||||
preempt_enable(); /* kernel_fpu_begin() does its own
|
||||
* preempt_disable(). decrement ours.
|
||||
*/
|
||||
wolfcrypt_linuxkm_fpu_states[processor_id] = 1U; /* set msb 0 to trigger kernel_fpu_end() at cleanup. */
|
||||
}
|
||||
#endif /* !LINUXKM_SIMD_IRQ */
|
||||
return nptr;
|
||||
}
|
||||
#endif /* HAVE_KVMALLOC */
|
||||
|
||||
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
|
||||
|
||||
static unsigned int wc_linuxkm_fpu_states_n_tracked = 0;
|
||||
|
||||
struct wc_thread_fpu_count_ent {
|
||||
volatile pid_t pid;
|
||||
unsigned int fpu_state;
|
||||
};
|
||||
struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_states = NULL;
|
||||
#define WC_FPU_COUNT_MASK 0x7fffffffU
|
||||
#define WC_FPU_SAVED_MASK 0x80000000U
|
||||
|
||||
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
|
||||
{
|
||||
if (wc_linuxkm_fpu_states != NULL) {
|
||||
static int warned_for_repeat_alloc = 0;
|
||||
if (! warned_for_repeat_alloc) {
|
||||
pr_err("attempt at repeat allocation"
|
||||
" in allocate_wolfcrypt_linuxkm_fpu_states\n");
|
||||
warned_for_repeat_alloc = 1;
|
||||
}
|
||||
return BAD_STATE_E;
|
||||
}
|
||||
|
||||
if (nr_cpu_ids >= 16)
|
||||
wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids * 2;
|
||||
else
|
||||
wc_linuxkm_fpu_states_n_tracked = 32;
|
||||
|
||||
wc_linuxkm_fpu_states =
|
||||
(struct wc_thread_fpu_count_ent *)malloc(
|
||||
wc_linuxkm_fpu_states_n_tracked * sizeof(wc_linuxkm_fpu_states[0]));
|
||||
|
||||
if (! wc_linuxkm_fpu_states) {
|
||||
pr_err("allocation of %lu bytes for "
|
||||
"wc_linuxkm_fpu_states failed.\n",
|
||||
nr_cpu_ids * sizeof(struct fpu_state *));
|
||||
return MEMORY_E;
|
||||
}
|
||||
|
||||
memset(wc_linuxkm_fpu_states, 0, wc_linuxkm_fpu_states_n_tracked * sizeof(wc_linuxkm_fpu_states[0]));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void free_wolfcrypt_linuxkm_fpu_states(void) {
|
||||
struct wc_thread_fpu_count_ent *i, *i_endptr;
|
||||
pid_t i_pid;
|
||||
|
||||
if (wc_linuxkm_fpu_states == NULL) {
|
||||
pr_err("free_wolfcrypt_linuxkm_fpu_states called"
|
||||
" before allocate_wolfcrypt_linuxkm_fpu_states.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = wc_linuxkm_fpu_states,
|
||||
i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
|
||||
i < i_endptr;
|
||||
++i)
|
||||
{
|
||||
i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
|
||||
if (i_pid == 0)
|
||||
continue;
|
||||
if (i->fpu_state != 0) {
|
||||
pr_err("free_wolfcrypt_linuxkm_fpu_states called"
|
||||
" with nonzero state 0x%x for pid %d.\n", i->fpu_state, i_pid);
|
||||
i->fpu_state = 0;
|
||||
}
|
||||
}
|
||||
|
||||
free(wc_linuxkm_fpu_states);
|
||||
wc_linuxkm_fpu_states = NULL;
|
||||
}
|
||||
|
||||
/* lock-(mostly)-free thread-local storage facility for tracking recursive fpu pushing/popping */
|
||||
static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(int create_p) {
|
||||
struct wc_thread_fpu_count_ent *i, *i_endptr, *i_empty;
|
||||
pid_t my_pid = task_pid_nr(current), i_pid;
|
||||
|
||||
{
|
||||
static int _warned_on_null = 0;
|
||||
if (wc_linuxkm_fpu_states == NULL)
|
||||
{
|
||||
if (_warned_on_null == 0) {
|
||||
pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
|
||||
" before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
|
||||
_warned_on_null = 1;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
|
||||
|
||||
for (;;) {
|
||||
for (i = wc_linuxkm_fpu_states,
|
||||
i_empty = NULL;
|
||||
i < i_endptr;
|
||||
++i)
|
||||
{
|
||||
i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
|
||||
if (i_pid == my_pid)
|
||||
return i;
|
||||
if ((i_empty == NULL) && (i_pid == 0))
|
||||
i_empty = i;
|
||||
}
|
||||
if ((i_empty == NULL) || (! create_p))
|
||||
return NULL;
|
||||
|
||||
i_pid = 0;
|
||||
if (__atomic_compare_exchange_n(
|
||||
&(i_empty->pid),
|
||||
&i_pid,
|
||||
my_pid,
|
||||
0 /* weak */,
|
||||
__ATOMIC_SEQ_CST /* success_memmodel */,
|
||||
__ATOMIC_SEQ_CST /* failure_memmodel */))
|
||||
{
|
||||
return i_empty;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void wc_linuxkm_fpu_state_free(struct wc_thread_fpu_count_ent *ent) {
|
||||
if (ent->fpu_state != 0) {
|
||||
static int warned_nonzero_fpu_state = 0;
|
||||
if (! warned_nonzero_fpu_state) {
|
||||
pr_err("wc_linuxkm_fpu_state_free for pid %d"
|
||||
" with nonzero fpu_state 0x%x.\n", ent->pid, ent->fpu_state);
|
||||
warned_nonzero_fpu_state = 1;
|
||||
}
|
||||
ent->fpu_state = 0;
|
||||
}
|
||||
__atomic_store_n(&ent->pid, 0, __ATOMIC_RELEASE);
|
||||
}
|
||||
|
||||
WARN_UNUSED_RESULT int save_vector_registers_x86(void)
|
||||
{
|
||||
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(1);
|
||||
if (pstate == NULL)
|
||||
return ENOMEM;
|
||||
|
||||
/* allow for nested calls */
|
||||
if (pstate->fpu_state != 0U) {
|
||||
if ((pstate->fpu_state & WC_FPU_COUNT_MASK)
|
||||
== WC_FPU_COUNT_MASK)
|
||||
{
|
||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
||||
"pid %d.\n", pstate->pid);
|
||||
return BAD_STATE_E;
|
||||
} else {
|
||||
++pstate->fpu_state;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
void restore_vector_registers_x86(void)
|
||||
{
|
||||
int processor_id = smp_processor_id();
|
||||
|
||||
if ((wolfcrypt_linuxkm_fpu_states == NULL)
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
|| (wolfcrypt_linuxkm_fpu_states[processor_id] == NULL)
|
||||
if (irq_fpu_usable()) {
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
/* inhibit migration, which gums up the algorithm in kernel_fpu_{begin,end}(). */
|
||||
migrate_disable();
|
||||
#endif
|
||||
)
|
||||
{
|
||||
pr_err("restore_vector_registers_x86 called for cpu id %d "
|
||||
"with null context buffer.\n", processor_id);
|
||||
return;
|
||||
}
|
||||
kernel_fpu_begin();
|
||||
pstate->fpu_state = 1U; /* set msb 0 to trigger kernel_fpu_end() at cleanup. */
|
||||
} else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0)) {
|
||||
static int warned_fpu_forbidden = 0;
|
||||
if (! warned_fpu_forbidden)
|
||||
pr_err("save_vector_registers_x86 called from IRQ handler.\n");
|
||||
wc_linuxkm_fpu_state_free(pstate);
|
||||
return EPERM;
|
||||
} else {
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
migrate_disable();
|
||||
#endif
|
||||
/* assume already safely in_kernel_fpu. */
|
||||
pstate->fpu_state =
|
||||
WC_FPU_SAVED_MASK + 1U; /* set msb 1 to inhibit kernel_fpu_end() at cleanup. */
|
||||
}
|
||||
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 0)
|
||||
{
|
||||
pr_err("restore_vector_registers_x86 called for cpu id %d "
|
||||
"without saved context.\n", processor_id);
|
||||
return;
|
||||
}
|
||||
|
||||
if (--((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] > 0) {
|
||||
preempt_enable(); /* preempt_disable count will still be nonzero after this decrement. */
|
||||
return;
|
||||
}
|
||||
|
||||
if (am_in_hard_interrupt_handler()) {
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
||||
copy_kernel_to_fpregs(wolfcrypt_linuxkm_fpu_states[processor_id]);
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
__restore_fpregs_from_fpstate(wolfcrypt_linuxkm_fpu_states[processor_id],
|
||||
xfeatures_mask_all);
|
||||
#else
|
||||
restore_fpregs_from_fpstate(wolfcrypt_linuxkm_fpu_states[processor_id],
|
||||
fpu_kernel_cfg.max_features);
|
||||
#endif
|
||||
preempt_enable();
|
||||
} else {
|
||||
kernel_fpu_end();
|
||||
}
|
||||
#else /* !LINUXKM_SIMD_IRQ */
|
||||
if ((wolfcrypt_linuxkm_fpu_states[processor_id] & WC_FPU_COUNT_MASK) == 0U)
|
||||
{
|
||||
pr_err("restore_vector_registers_x86 called for cpu id %d "
|
||||
"without saved context.\n", processor_id);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((--wolfcrypt_linuxkm_fpu_states[processor_id] & WC_FPU_COUNT_MASK) > 0U) {
|
||||
preempt_enable(); /* preempt_disable count may still be nonzero
|
||||
* after this decrement, but any remaining
|
||||
* count(s) aren't ours.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
if (wolfcrypt_linuxkm_fpu_states[processor_id] == 0U) {
|
||||
kernel_fpu_end();
|
||||
} else {
|
||||
preempt_enable(); /* preempt_disable count will still be nonzero
|
||||
* after this decrement.
|
||||
*/
|
||||
wolfcrypt_linuxkm_fpu_states[processor_id] = 0U;
|
||||
}
|
||||
#endif /* !LINUXKM_SIMD_IRQ */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void restore_vector_registers_x86(void)
|
||||
{
|
||||
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0);
|
||||
if (pstate == NULL) {
|
||||
pr_err("restore_vector_registers_x86 called by pid %d "
|
||||
"with no saved state.\n", task_pid_nr(current));
|
||||
return;
|
||||
}
|
||||
|
||||
if ((--pstate->fpu_state & WC_FPU_COUNT_MASK) > 0U) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (pstate->fpu_state == 0U)
|
||||
kernel_fpu_end();
|
||||
else
|
||||
pstate->fpu_state = 0U;
|
||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
migrate_enable();
|
||||
#endif
|
||||
|
||||
wc_linuxkm_fpu_state_free(pstate);
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS && CONFIG_X86 */
|
||||
|
||||
#if defined(__PIE__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||
|
@ -24,6 +24,12 @@
|
||||
#ifndef LINUXKM_WC_PORT_H
|
||||
#define LINUXKM_WC_PORT_H
|
||||
|
||||
#include <linux/version.h>
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
|
||||
#error Unsupported kernel.
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#ifndef PACKAGE_NAME
|
||||
#error wc_port.h included before config.h
|
||||
@ -59,6 +65,23 @@
|
||||
(int)_xatoi_res; \
|
||||
})
|
||||
|
||||
/* Kbuild+gcc on x86 doesn't consistently honor the default ALIGN16 on stack objects,
|
||||
* but gives adequate alignment with "32".
|
||||
*/
|
||||
#if defined(CONFIG_X86) && !defined(ALIGN16)
|
||||
#define ALIGN16 __attribute__ ( (aligned (32)))
|
||||
#endif
|
||||
|
||||
/* kvmalloc()/kvfree() and friends added in linux commit a7c3e901 */
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
|
||||
#define HAVE_KVMALLOC
|
||||
#endif
|
||||
|
||||
/* kernel printf doesn't implement fp. */
|
||||
#ifndef WOLFSSL_NO_FLOAT_FMT
|
||||
#define WOLFSSL_NO_FLOAT_FMT
|
||||
#endif
|
||||
|
||||
#ifdef BUILDING_WOLFSSL
|
||||
|
||||
#if defined(CONFIG_MIPS) && defined(HAVE_LINUXKM_PIE_SUPPORT)
|
||||
@ -95,7 +118,6 @@
|
||||
|
||||
#include <linux/kconfig.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
@ -124,6 +146,8 @@
|
||||
#ifndef CONFIG_X86
|
||||
#error X86 SIMD extensions requested, but CONFIG_X86 is not set.
|
||||
#endif
|
||||
#define WOLFSSL_LINUXKM_SIMD
|
||||
#define WOLFSSL_LINUXKM_SIMD_X86
|
||||
#ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
#endif
|
||||
@ -133,6 +157,8 @@
|
||||
#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
|
||||
#error ARM SIMD extensions requested, but CONFIG_ARM* is not set.
|
||||
#endif
|
||||
#define WOLFSSL_LINUXKM_SIMD
|
||||
#define WOLFSSL_LINUXKM_SIMD_ARM
|
||||
#ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
#endif
|
||||
@ -142,26 +168,17 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* benchmarks.c uses floating point math, so needs a working SAVE_VECTOR_REGISTERS(). */
|
||||
#if defined(WOLFSSL_LINUXKM_BENCHMARKS) && !defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS)
|
||||
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
#endif
|
||||
|
||||
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
|
||||
#define WOLFSSL_LINUXKM_SIMD
|
||||
#define WOLFSSL_LINUXKM_SIMD_X86
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||
#include <asm/i387.h>
|
||||
#else
|
||||
#include <asm/simd.h>
|
||||
#endif
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
|
||||
#include <asm/fpu/internal.h>
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
|
||||
#error LINUXKM_SIMD_IRQ is unavailable on linux >= 5.16 (missing exports around fpregs)
|
||||
/*
|
||||
* #include <asm/fpu/sched.h>
|
||||
* #include <asm/fpu/signal.h>
|
||||
*/
|
||||
#endif
|
||||
#endif
|
||||
#ifndef SAVE_VECTOR_REGISTERS
|
||||
#define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_x86(); if (_svr_ret != 0) { fail_clause } }
|
||||
#endif
|
||||
@ -169,12 +186,7 @@
|
||||
#define RESTORE_VECTOR_REGISTERS() restore_vector_registers_x86()
|
||||
#endif
|
||||
#elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
|
||||
#define WOLFSSL_LINUXKM_SIMD
|
||||
#define WOLFSSL_LINUXKM_SIMD_ARM
|
||||
#include <asm/fpsimd.h>
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
#error LINUXKM_SIMD_IRQ is unavailable on ARM (not implemented)
|
||||
#endif
|
||||
#ifndef SAVE_VECTOR_REGISTERS
|
||||
#define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_arm(); if (_svr_ret != 0) { fail_clause } }
|
||||
#endif
|
||||
@ -195,11 +207,6 @@
|
||||
#define NO_THREAD_LS
|
||||
#define NO_ATTRIBUTE_CONSTRUCTOR
|
||||
|
||||
/* kvmalloc()/kvfree() and friends added in linux commit a7c3e901 */
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
|
||||
#define HAVE_KVMALLOC
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_FIPS
|
||||
extern int wolfCrypt_FIPS_first(void);
|
||||
extern int wolfCrypt_FIPS_last(void);
|
||||
@ -215,7 +222,7 @@
|
||||
#endif
|
||||
|
||||
#if defined(__PIE__) && !defined(USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE)
|
||||
#error "compiling -fPIE without PIE support."
|
||||
#error "compiling -fPIE requires PIE redirect table."
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_FIPS) && !defined(HAVE_LINUXKM_PIE_SUPPORT)
|
||||
@ -307,42 +314,37 @@
|
||||
struct task_struct *(*get_current)(void);
|
||||
int (*preempt_count)(void);
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
||||
typeof(irq_fpu_usable) *irq_fpu_usable;
|
||||
/* kernel_fpu_begin() replaced by kernel_fpu_begin_mask() in commit e4512289,
|
||||
* released in kernel 5.11, backported to 5.4.93
|
||||
*/
|
||||
#ifdef kernel_fpu_begin
|
||||
typeof(kernel_fpu_begin_mask) *kernel_fpu_begin_mask;
|
||||
#else
|
||||
typeof(kernel_fpu_begin) *kernel_fpu_begin;
|
||||
#endif
|
||||
typeof(kernel_fpu_end) *kernel_fpu_end;
|
||||
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
||||
typeof(copy_fpregs_to_fpstate) *copy_fpregs_to_fpstate;
|
||||
typeof(copy_kernel_to_fpregs) *copy_kernel_to_fpregs;
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
typeof(save_fpregs_to_fpstate) *save_fpregs_to_fpstate;
|
||||
typeof(__restore_fpregs_from_fpstate) *__restore_fpregs_from_fpstate;
|
||||
typeof(xfeatures_mask_all) *xfeatures_mask_all;
|
||||
/*
|
||||
* #else
|
||||
* typeof(save_fpregs_to_fpstate) *save_fpregs_to_fpstate;
|
||||
* typeof(restore_fpregs_from_fpstate) *restore_fpregs_from_fpstate;
|
||||
* typeof(fpu_kernel_cfg) *fpu_kernel_cfg;
|
||||
*/
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||
typeof(cpu_number) *cpu_number;
|
||||
#else
|
||||
typeof(pcpu_hot) *pcpu_hot;
|
||||
#endif
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||
typeof(cpu_number) *cpu_number;
|
||||
#else
|
||||
typeof(pcpu_hot) *pcpu_hot;
|
||||
#endif
|
||||
typeof(nr_cpu_ids) *nr_cpu_ids;
|
||||
typeof(nr_cpu_ids) *nr_cpu_ids;
|
||||
|
||||
#endif /* WOLFSSL_LINUXKM_SIMD_X86 */
|
||||
#if defined(CONFIG_SMP) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
/* note the current and needed version of these were added in af449901b8 (2020-Sep-17) */
|
||||
typeof(migrate_disable) *migrate_disable;
|
||||
typeof(migrate_enable) *migrate_enable;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
typeof(irq_fpu_usable) *irq_fpu_usable;
|
||||
/* kernel_fpu_begin() replaced by kernel_fpu_begin_mask() in commit e4512289,
|
||||
* released in kernel 5.11, backported to 5.4.93
|
||||
*/
|
||||
#ifdef kernel_fpu_begin
|
||||
typeof(kernel_fpu_begin_mask) *kernel_fpu_begin_mask;
|
||||
#else
|
||||
typeof(kernel_fpu_begin) *kernel_fpu_begin;
|
||||
#endif
|
||||
typeof(kernel_fpu_end) *kernel_fpu_end;
|
||||
#else /* !CONFIG_X86 */
|
||||
#error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unsupported architecture.
|
||||
#endif /* arch */
|
||||
|
||||
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||
|
||||
typeof(__mutex_init) *__mutex_init;
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||
@ -452,37 +454,31 @@
|
||||
#undef preempt_count
|
||||
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
||||
#define irq_fpu_usable (wolfssl_linuxkm_get_pie_redirect_table()->irq_fpu_usable)
|
||||
#ifdef kernel_fpu_begin
|
||||
#define kernel_fpu_begin_mask (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin_mask)
|
||||
#else
|
||||
#define kernel_fpu_begin (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin)
|
||||
#endif
|
||||
#define kernel_fpu_end (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_end)
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
||||
#define copy_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->copy_fpregs_to_fpstate)
|
||||
#define copy_kernel_to_fpregs (wolfssl_linuxkm_get_pie_redirect_table()->copy_kernel_to_fpregs)
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
#define save_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->save_fpregs_to_fpstate)
|
||||
#define __restore_fpregs_from_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->__restore_fpregs_from_fpstate)
|
||||
#define xfeatures_mask_all (*(wolfssl_linuxkm_get_pie_redirect_table()->xfeatures_mask_all))
|
||||
/*
|
||||
* #else
|
||||
* #define save_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->save_fpregs_to_fpstate)
|
||||
* #define restore_fpregs_from_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->restore_fpregs_from_fpstate)
|
||||
* #define fpu_kernel_cfg (*(wolfssl_linuxkm_get_pie_redirect_table()->fpu_kernel_cfg))
|
||||
*/
|
||||
#endif
|
||||
#endif
|
||||
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||
#define cpu_number (*(wolfssl_linuxkm_get_pie_redirect_table()->cpu_number))
|
||||
#else
|
||||
#define pcpu_hot (*(wolfssl_linuxkm_get_pie_redirect_table()->pcpu_hot))
|
||||
#endif
|
||||
#define nr_cpu_ids (*(wolfssl_linuxkm_get_pie_redirect_table()->nr_cpu_ids))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
#define migrate_disable (*(wolfssl_linuxkm_get_pie_redirect_table()->migrate_disable))
|
||||
#define migrate_enable (*(wolfssl_linuxkm_get_pie_redirect_table()->migrate_enable))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#define irq_fpu_usable (wolfssl_linuxkm_get_pie_redirect_table()->irq_fpu_usable)
|
||||
#ifdef kernel_fpu_begin
|
||||
#define kernel_fpu_begin_mask (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin_mask)
|
||||
#else
|
||||
#define kernel_fpu_begin (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin)
|
||||
#endif
|
||||
#define kernel_fpu_end (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_end)
|
||||
#else /* !CONFIG_X86 */
|
||||
#error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unsupported architecture.
|
||||
#endif /* archs */
|
||||
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||
|
||||
#define __mutex_init (wolfssl_linuxkm_get_pie_redirect_table()->__mutex_init)
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||
@ -515,9 +511,9 @@
|
||||
|
||||
#endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_SIMD
|
||||
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
||||
#ifdef CONFIG_X86
|
||||
|
||||
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states(void);
|
||||
extern void free_wolfcrypt_linuxkm_fpu_states(void);
|
||||
@ -547,7 +543,7 @@
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* WOLFSSL_LINUXKM_SIMD */
|
||||
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||
|
||||
/* remove this multifariously conflicting macro, picked up from
|
||||
* Linux arch/<arch>/include/asm/current.h.
|
||||
@ -556,22 +552,6 @@
|
||||
#undef current
|
||||
#endif
|
||||
|
||||
/* prevent gcc's mm_malloc.h from being included, since it unconditionally
|
||||
* includes stdlib.h, which is kernel-incompatible.
|
||||
*/
|
||||
#define _MM_MALLOC_H_INCLUDED
|
||||
|
||||
#ifdef HAVE_KVMALLOC
|
||||
#define malloc(x) kvmalloc_node(x, GFP_KERNEL, NUMA_NO_NODE)
|
||||
#define free(x) kvfree(x)
|
||||
void *lkm_realloc(void *ptr, size_t newsize);
|
||||
#define realloc(x, y) lkm_realloc(x, y)
|
||||
#else
|
||||
#define malloc(x) kmalloc(x, GFP_KERNEL)
|
||||
#define free(x) kfree(x)
|
||||
#define realloc(x,y) krealloc(x, y, GFP_KERNEL)
|
||||
#endif
|
||||
|
||||
/* min() and max() in linux/kernel.h over-aggressively type-check, producing
|
||||
* myriad spurious -Werrors throughout the codebase.
|
||||
*/
|
||||
@ -618,9 +598,41 @@
|
||||
#include <linux/mutex.h>
|
||||
typedef struct mutex wolfSSL_Mutex;
|
||||
|
||||
#define XMALLOC(s, h, t) ({(void)(h); (void)(t); kmalloc(s, GFP_KERNEL);})
|
||||
#define XFREE(p, h, t) ({void* _xp; (void)(h); _xp = (p); if(_xp) kfree(_xp);})
|
||||
#define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); krealloc((p), (n), GFP_KERNEL);})
|
||||
/* prevent gcc's mm_malloc.h from being included, since it unconditionally
|
||||
* includes stdlib.h, which is kernel-incompatible.
|
||||
*/
|
||||
#define _MM_MALLOC_H_INCLUDED
|
||||
|
||||
/* fun fact: since linux commit 59bb47985c, kmalloc with power-of-2 size is
|
||||
* aligned to the size.
|
||||
*/
|
||||
#define WC_LINUXKM_ROUND_UP_P_OF_2(x) ( \
|
||||
{ \
|
||||
size_t _alloc_sz = (x); \
|
||||
_alloc_sz = 1UL << ((sizeof(_alloc_sz) * 8UL) - __builtin_clzl(_alloc_sz)); \
|
||||
_alloc_sz; \
|
||||
})
|
||||
#ifdef HAVE_KVMALLOC
|
||||
#define malloc(size) kvmalloc_node(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL, NUMA_NO_NODE)
|
||||
#define free(ptr) kvfree(ptr)
|
||||
void *lkm_realloc(void *ptr, size_t newsize);
|
||||
#define realloc(ptr, newsize) lkm_realloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize))
|
||||
#else
|
||||
#define malloc(size) kmalloc(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL)
|
||||
#define free(ptr) kfree(ptr)
|
||||
#define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), GFP_KERNEL)
|
||||
#endif
|
||||
|
||||
#ifdef WOLFSSL_TRACK_MEMORY
|
||||
#include <wolfssl/wolfcrypt/memory.h>
|
||||
#define XMALLOC(s, h, t) ({(void)(h); (void)(t); wolfSSL_Malloc(s);})
|
||||
#define XFREE(p, h, t) ({void* _xp; (void)(h); _xp = (p); if(_xp) wolfSSL_Free(_xp);})
|
||||
#define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); wolfSSL_Realloc(p, n);})
|
||||
#else
|
||||
#define XMALLOC(s, h, t) ({(void)(h); (void)(t); malloc(s);})
|
||||
#define XFREE(p, h, t) ({void* _xp; (void)(h); _xp = (p); if(_xp) free(_xp);})
|
||||
#define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); realloc(p, n);})
|
||||
#endif
|
||||
|
||||
#include <linux/limits.h>
|
||||
|
||||
|
@ -113,6 +113,15 @@ static void lkmFipsCb(int ok, int err, const char* hash)
|
||||
static int updateFipsHash(void);
|
||||
#endif
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_BENCHMARKS
|
||||
#undef HAVE_PTHREAD
|
||||
#define STRING_USER
|
||||
#define NO_MAIN_FUNCTION
|
||||
#define current_time benchmark_current_time
|
||||
#define WOLFSSL_NO_FLOAT_FMT
|
||||
#include "wolfcrypt/benchmark/benchmark.c"
|
||||
#endif /* WOLFSSL_LINUXKM_BENCHMARKS */
|
||||
|
||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
||||
static int __init wolfssl_init(void)
|
||||
#else
|
||||
@ -202,10 +211,9 @@ static int wolfssl_init(void)
|
||||
* the true module start address, which is potentially useful to an
|
||||
* attacker.
|
||||
*/
|
||||
pr_info("wolfCrypt container hashes (spans): %x (%lu) %x (%lu), text base %pK, ro base %pK\n",
|
||||
pr_info("wolfCrypt container hashes (spans): text 0x%x (%lu), rodata 0x%x (%lu)\n",
|
||||
text_hash, pie_text_end-pie_text_start,
|
||||
rodata_hash, pie_rodata_end-pie_rodata_start,
|
||||
THIS_MODULE_TEXT_BASE, THIS_MODULE_RO_BASE);
|
||||
rodata_hash, pie_rodata_end-pie_rodata_start);
|
||||
}
|
||||
#endif /* HAVE_LINUXKM_PIE_SUPPORT */
|
||||
|
||||
@ -277,6 +285,10 @@ static int wolfssl_init(void)
|
||||
pr_info("wolfCrypt self-test passed.\n");
|
||||
#endif
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_BENCHMARKS
|
||||
wolfcrypt_benchmark_main(0, (char**)NULL);
|
||||
#endif
|
||||
|
||||
#ifdef WOLFCRYPT_ONLY
|
||||
pr_info("wolfCrypt " LIBWOLFSSL_VERSION_STRING " loaded%s"
|
||||
".\nSee https://www.wolfssl.com/ for more information.\n"
|
||||
@ -334,15 +346,6 @@ static int my_preempt_count(void) {
|
||||
return preempt_count();
|
||||
}
|
||||
|
||||
#if defined(WOLFSSL_LINUXKM_SIMD_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0))
|
||||
static int my_copy_fpregs_to_fpstate(struct fpu *fpu) {
|
||||
return copy_fpregs_to_fpstate(fpu);
|
||||
}
|
||||
static void my_copy_kernel_to_fpregs(union fpregs_state *fpstate) {
|
||||
copy_kernel_to_fpregs(fpstate);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
||||
memset(
|
||||
&wolfssl_linuxkm_pie_redirect_table,
|
||||
@ -430,6 +433,20 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
||||
wolfssl_linuxkm_pie_redirect_table.get_current = my_get_current_thread;
|
||||
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||
wolfssl_linuxkm_pie_redirect_table.cpu_number = &cpu_number;
|
||||
#else
|
||||
wolfssl_linuxkm_pie_redirect_table.pcpu_hot = &pcpu_hot;
|
||||
#endif
|
||||
wolfssl_linuxkm_pie_redirect_table.nr_cpu_ids = &nr_cpu_ids;
|
||||
|
||||
#if defined(CONFIG_SMP) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||
wolfssl_linuxkm_pie_redirect_table.migrate_disable = &migrate_disable;
|
||||
wolfssl_linuxkm_pie_redirect_table.migrate_enable = &migrate_enable;
|
||||
#endif
|
||||
|
||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
||||
wolfssl_linuxkm_pie_redirect_table.irq_fpu_usable = irq_fpu_usable;
|
||||
#ifdef kernel_fpu_begin
|
||||
@ -440,29 +457,9 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
||||
kernel_fpu_begin;
|
||||
#endif
|
||||
wolfssl_linuxkm_pie_redirect_table.kernel_fpu_end = kernel_fpu_end;
|
||||
#ifdef LINUXKM_SIMD_IRQ
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
||||
wolfssl_linuxkm_pie_redirect_table.copy_fpregs_to_fpstate = my_copy_fpregs_to_fpstate;
|
||||
wolfssl_linuxkm_pie_redirect_table.copy_kernel_to_fpregs = my_copy_kernel_to_fpregs;
|
||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
||||
wolfssl_linuxkm_pie_redirect_table.save_fpregs_to_fpstate = save_fpregs_to_fpstate;
|
||||
wolfssl_linuxkm_pie_redirect_table.__restore_fpregs_from_fpstate = __restore_fpregs_from_fpstate;
|
||||
wolfssl_linuxkm_pie_redirect_table.xfeatures_mask_all = &xfeatures_mask_all;
|
||||
/*
|
||||
* #else
|
||||
* wolfssl_linuxkm_pie_redirect_table.save_fpregs_to_fpstate = save_fpregs_to_fpstate;
|
||||
* wolfssl_linuxkm_pie_redirect_table.restore_fpregs_from_fpstate = restore_fpregs_from_fpstate;
|
||||
* wolfssl_linuxkm_pie_redirect_table.fpu_kernel_cfg = &fpu_kernel_cfg;
|
||||
*/
|
||||
#endif
|
||||
#endif
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||
wolfssl_linuxkm_pie_redirect_table.cpu_number = &cpu_number;
|
||||
#else
|
||||
wolfssl_linuxkm_pie_redirect_table.pcpu_hot = &pcpu_hot;
|
||||
#endif
|
||||
wolfssl_linuxkm_pie_redirect_table.nr_cpu_ids = &nr_cpu_ids;
|
||||
#endif
|
||||
#endif /* WOLFSSL_LINUXKM_SIMD_X86 */
|
||||
|
||||
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||
|
||||
wolfssl_linuxkm_pie_redirect_table.__mutex_init = __mutex_init;
|
||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||
|
@ -33329,7 +33329,6 @@ static int DoSessionTicket(WOLFSSL* ssl, const byte* input, word32* inOutIdx,
|
||||
ret = DoClientHelloStateless(ssl, input, inOutIdx, helloSz);
|
||||
if (ret != 0 || !ssl->options.dtlsStateful) {
|
||||
int alertType = TranslateErrorToAlert(ret);
|
||||
if (alertType != invalid_alert)
|
||||
if (alertType != invalid_alert) {
|
||||
int err;
|
||||
|
||||
|
@ -3653,7 +3653,9 @@ static int wolfssl_asn1_time_to_tm(const WOLFSSL_ASN1_TIME* asnTime,
|
||||
int asn1TimeBufLen;
|
||||
int i = 0;
|
||||
#ifdef XMKTIME
|
||||
struct tm localTm = {0};
|
||||
struct tm localTm;
|
||||
|
||||
XMEMSET(&localTm, 0, sizeof localTm);
|
||||
#endif
|
||||
|
||||
/* Get the string buffer - fixed array, can't fail. */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -81,7 +81,7 @@ void bench_sha3_384(int useDeviceID);
|
||||
void bench_sha3_512(int useDeviceID);
|
||||
void bench_shake128(int useDeviceID);
|
||||
void bench_shake256(int useDeviceID);
|
||||
int bench_ripemd(void);
|
||||
void bench_ripemd(void);
|
||||
void bench_cmac(int useDeviceID);
|
||||
void bench_scrypt(void);
|
||||
void bench_hmac_md5(int useDeviceID);
|
||||
|
@ -3129,6 +3129,12 @@ static WARN_UNUSED_RESULT int wc_AesDecrypt(
|
||||
checkAESNI = 1;
|
||||
}
|
||||
if (haveAESNI) {
|
||||
#ifdef WOLFSSL_LINUXKM
|
||||
/* runtime alignment check */
|
||||
if ((wc_ptr_t)&aes->key & (wc_ptr_t)0xf) {
|
||||
return BAD_ALIGN_E;
|
||||
}
|
||||
#endif
|
||||
aes->use_aesni = 1;
|
||||
if (iv)
|
||||
XMEMCPY(aes->reg, iv, AES_BLOCK_SIZE);
|
||||
|
@ -1864,7 +1864,7 @@ int wc_RNG_GenerateBlock(WC_RNG* rng, byte* output, word32 sz)
|
||||
|
||||
#ifdef CUSTOM_RAND_GENERATE_BLOCK
|
||||
XMEMSET(output, 0, sz);
|
||||
ret = CUSTOM_RAND_GENERATE_BLOCK(output, sz);
|
||||
ret = (int)CUSTOM_RAND_GENERATE_BLOCK(output, sz);
|
||||
#else
|
||||
|
||||
#ifdef HAVE_HASHDRBG
|
||||
|
@ -207,7 +207,7 @@ int wolfCrypt_Init(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(WOLFSSL_LINUXKM_SIMD_X86)
|
||||
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
ret = allocate_wolfcrypt_linuxkm_fpu_states();
|
||||
if (ret != 0) {
|
||||
WOLFSSL_MSG("allocate_wolfcrypt_linuxkm_fpu_states failed");
|
||||
@ -466,7 +466,7 @@ int wolfCrypt_Cleanup(void)
|
||||
rpcmem_deinit();
|
||||
wolfSSL_CleanupHandle();
|
||||
#endif
|
||||
#if defined(WOLFSSL_LINUXKM_SIMD_X86)
|
||||
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||
free_wolfcrypt_linuxkm_fpu_states();
|
||||
#endif
|
||||
|
||||
@ -3207,56 +3207,6 @@ char* mystrnstr(const char* s1, const char* s2, unsigned int n)
|
||||
|
||||
#endif /* WOLFSSL_NUCLEUS_1_2 */
|
||||
|
||||
#if defined(WOLFSSL_LINUXKM) && defined(HAVE_KVMALLOC)
|
||||
/* adapted from kvrealloc() draft by Changli Gao, 2010-05-13 */
|
||||
void *lkm_realloc(void *ptr, size_t newsize) {
|
||||
void *nptr;
|
||||
size_t oldsize;
|
||||
|
||||
if (unlikely(newsize == 0)) {
|
||||
kvfree(ptr);
|
||||
return ZERO_SIZE_PTR;
|
||||
}
|
||||
|
||||
if (unlikely(ptr == NULL))
|
||||
return kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
||||
|
||||
if (is_vmalloc_addr(ptr)) {
|
||||
/* no way to discern the size of the old allocation,
|
||||
* because the kernel doesn't export find_vm_area(). if
|
||||
* it did, we could then call get_vm_area_size() on the
|
||||
* returned struct vm_struct.
|
||||
*/
|
||||
return NULL;
|
||||
} else {
|
||||
#ifndef __PIE__
|
||||
struct page *page;
|
||||
|
||||
page = virt_to_head_page(ptr);
|
||||
if (PageSlab(page) || PageCompound(page)) {
|
||||
if (newsize < PAGE_SIZE)
|
||||
#endif /* ! __PIE__ */
|
||||
return krealloc(ptr, newsize, GFP_KERNEL);
|
||||
#ifndef __PIE__
|
||||
oldsize = ksize(ptr);
|
||||
} else {
|
||||
oldsize = page->private;
|
||||
if (newsize <= oldsize)
|
||||
return ptr;
|
||||
}
|
||||
#endif /* ! __PIE__ */
|
||||
}
|
||||
|
||||
nptr = kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
||||
if (nptr != NULL) {
|
||||
memcpy(nptr, ptr, oldsize);
|
||||
kvfree(ptr);
|
||||
}
|
||||
|
||||
return nptr;
|
||||
}
|
||||
#endif /* WOLFSSL_LINUXKM && HAVE_KVMALLOC */
|
||||
|
||||
#if defined(WOLFSSL_TI_CRYPT) || defined(WOLFSSL_TI_HASH)
|
||||
#include <wolfcrypt/src/port/ti/ti-ccm.c> /* initialize and Mutex for TI Crypt Engine */
|
||||
#include <wolfcrypt/src/port/ti/ti-hash.c> /* md5, sha1, sha224, sha256 */
|
||||
|
@ -62,15 +62,24 @@
|
||||
|
||||
#include "wolfssl/wolfcrypt/settings.h"
|
||||
#include "wolfssl/wolfcrypt/logging.h"
|
||||
#include "wolfssl/wolfcrypt/memory.h"
|
||||
|
||||
#if defined(WOLFSSL_TRACK_MEMORY) || defined(HAVE_STACK_SIZE) || \
|
||||
defined(HAVE_STACK_SIZE_VERBOSE)
|
||||
#include <stdio.h>
|
||||
#ifdef NO_STDIO_FILESYSTEM
|
||||
/* if wc_port.h/linuxkm_wc_port.h doesn't define printf, then the user
|
||||
* needs to define it.
|
||||
*/
|
||||
#define wc_mem_printf(...) printf(__VA_ARGS__)
|
||||
#else
|
||||
#include <stdio.h>
|
||||
#define wc_mem_printf(...) fprintf(stderr, __VA_ARGS__)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(WOLFSSL_TRACK_MEMORY)
|
||||
#define DO_MEM_STATS
|
||||
#if defined(__linux__) || defined(__MACH__)
|
||||
#if (defined(__linux__) && !defined(WOLFSSL_LINUXKM)) || defined(__MACH__)
|
||||
#define DO_MEM_LIST
|
||||
#endif
|
||||
#endif
|
||||
@ -160,7 +169,7 @@ static WC_INLINE void* TrackMalloc(size_t sz)
|
||||
|
||||
#ifdef WOLFSSL_DEBUG_MEMORY
|
||||
#ifdef WOLFSSL_DEBUG_MEMORY_PRINT
|
||||
fprintf(stderr, "Alloc: %p -> %u at %s:%d\n", header->thisMemory, (word32)sz, func, line);
|
||||
wc_mem_printf("Alloc: %p -> %u at %s:%d\n", header->thisMemory, (word32)sz, func, line);
|
||||
#else
|
||||
(void)func;
|
||||
(void)line;
|
||||
@ -268,7 +277,7 @@ static WC_INLINE void TrackFree(void* ptr)
|
||||
|
||||
#ifdef WOLFSSL_DEBUG_MEMORY
|
||||
#ifdef WOLFSSL_DEBUG_MEMORY_PRINT
|
||||
fprintf(stderr, "Free: %p -> %u at %s:%d\n", ptr, (word32)sz, func, line);
|
||||
wc_mem_printf("Free: %p -> %u at %s:%d\n", ptr, (word32)sz, func, line);
|
||||
#else
|
||||
(void)func;
|
||||
(void)line;
|
||||
@ -334,11 +343,11 @@ static WC_INLINE int InitMemoryTracker(void)
|
||||
|
||||
ret = wolfSSL_GetAllocators(&mfDefault, &ffDefault, &rfDefault);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "wolfSSL GetAllocators failed to get the defaults\n");
|
||||
wc_mem_printf("wolfSSL GetAllocators failed to get the defaults\n");
|
||||
}
|
||||
ret = wolfSSL_SetAllocators(TrackMalloc, TrackFree, TrackRealloc);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "wolfSSL SetAllocators failed for track memory\n");
|
||||
wc_mem_printf("wolfSSL SetAllocators failed for track memory\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -377,11 +386,11 @@ static WC_INLINE void ShowMemoryTracker(void)
|
||||
#endif
|
||||
|
||||
#ifdef DO_MEM_STATS
|
||||
fprintf(stderr, "total Allocs = %9ld\n", ourMemStats.totalAllocs);
|
||||
fprintf(stderr, "total Deallocs = %9ld\n", ourMemStats.totalDeallocs);
|
||||
fprintf(stderr, "total Bytes = %9ld\n", ourMemStats.totalBytes);
|
||||
fprintf(stderr, "peak Bytes = %9ld\n", ourMemStats.peakBytes);
|
||||
fprintf(stderr, "current Bytes = %9ld\n", ourMemStats.currentBytes);
|
||||
wc_mem_printf("total Allocs = %9ld\n", ourMemStats.totalAllocs);
|
||||
wc_mem_printf("total Deallocs = %9ld\n", ourMemStats.totalDeallocs);
|
||||
wc_mem_printf("total Bytes = %9ld\n", ourMemStats.totalBytes);
|
||||
wc_mem_printf("peak Bytes = %9ld\n", ourMemStats.peakBytes);
|
||||
wc_mem_printf("current Bytes = %9ld\n", ourMemStats.currentBytes);
|
||||
#endif
|
||||
|
||||
#ifdef DO_MEM_LIST
|
||||
@ -389,16 +398,14 @@ static WC_INLINE void ShowMemoryTracker(void)
|
||||
/* print list of allocations */
|
||||
memHint* header;
|
||||
for (header = ourMemList.head; header != NULL; header = header->next) {
|
||||
fprintf(stderr, "Leak: Ptr %p, Size %u"
|
||||
#ifdef WOLFSSL_DEBUG_MEMORY
|
||||
", Func %s, Line %d"
|
||||
#endif
|
||||
"\n",
|
||||
(byte*)header + sizeof(memHint), (unsigned int)header->thisSize
|
||||
#ifdef WOLFSSL_DEBUG_MEMORY
|
||||
, header->func, header->line
|
||||
#endif
|
||||
);
|
||||
wc_mem_printf("Leak: Ptr %p, Size %u, Func %s, Line %d\n",
|
||||
(byte*)header + sizeof(memHint), (unsigned int)header->thisSize,
|
||||
header->func, header->line);
|
||||
#else
|
||||
wc_mem_printf("Leak: Ptr %p, Size %u\n",
|
||||
(byte*)header + sizeof(memHint), (unsigned int)header->thisSize);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -538,7 +545,7 @@ int StackSizeHWMReset(void)
|
||||
|
||||
#define STACK_SIZE_CHECKPOINT_MSG(msg) ({ \
|
||||
ssize_t HWM = StackSizeHWM_OffsetCorrected(); \
|
||||
fprintf(stderr, "%ld\t%s\n", (long int)HWM, msg); \
|
||||
wc_mem_printf("%ld\t%s\n", (long int)HWM, msg); \
|
||||
StackSizeHWMReset(); \
|
||||
})
|
||||
|
||||
@ -549,7 +556,7 @@ int StackSizeHWMReset(void)
|
||||
printf(" relative stack peak usage = %ld bytes\n", (long int)HWM); \
|
||||
_ret = StackSizeHWMReset(); \
|
||||
if ((max >= 0) && (HWM > (ssize_t)(max))) { \
|
||||
fprintf(stderr, \
|
||||
wc_mem_printf( \
|
||||
" relative stack usage at %s L%d exceeds designated max %ld bytes.\n", \
|
||||
__FILE__, __LINE__, (long int)(max)); \
|
||||
_ret = -1; \
|
||||
@ -585,7 +592,7 @@ static WC_INLINE int StackSizeCheck(struct func_args* args, thread_func tf)
|
||||
|
||||
ret = posix_memalign((void**)&myStack, sysconf(_SC_PAGESIZE), stackSize);
|
||||
if (ret != 0 || myStack == NULL) {
|
||||
fprintf(stderr, "posix_memalign failed\n");
|
||||
wc_mem_printf("posix_memalign failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -593,13 +600,13 @@ static WC_INLINE int StackSizeCheck(struct func_args* args, thread_func tf)
|
||||
|
||||
ret = pthread_attr_init(&myAttr);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "attr_init failed\n");
|
||||
wc_mem_printf("attr_init failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pthread_attr_setstack(&myAttr, myStack, stackSize);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "attr_setstackaddr failed\n");
|
||||
wc_mem_printf("attr_setstackaddr failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -623,7 +630,7 @@ static WC_INLINE int StackSizeCheck(struct func_args* args, thread_func tf)
|
||||
|
||||
ret = pthread_join(threadId, &status);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "pthread_join failed\n");
|
||||
wc_mem_printf("pthread_join failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -672,7 +679,7 @@ static WC_INLINE int StackSizeCheck_launch(struct func_args* args,
|
||||
|
||||
ret = posix_memalign((void**)&myStack, sysconf(_SC_PAGESIZE), stackSize);
|
||||
if (ret != 0 || myStack == NULL) {
|
||||
fprintf(stderr, "posix_memalign failed\n");
|
||||
wc_mem_printf("posix_memalign failed\n");
|
||||
free(shim_args);
|
||||
return -1;
|
||||
}
|
||||
@ -681,7 +688,7 @@ static WC_INLINE int StackSizeCheck_launch(struct func_args* args,
|
||||
|
||||
ret = pthread_attr_init(&myAttr);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "attr_init failed\n");
|
||||
wc_mem_printf("attr_init failed\n");
|
||||
free(shim_args);
|
||||
free(myStack);
|
||||
return ret;
|
||||
@ -689,7 +696,7 @@ static WC_INLINE int StackSizeCheck_launch(struct func_args* args,
|
||||
|
||||
ret = pthread_attr_setstack(&myAttr, myStack, stackSize);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "attr_setstackaddr failed\n");
|
||||
wc_mem_printf("attr_setstackaddr failed\n");
|
||||
}
|
||||
|
||||
shim_args->myStack = myStack;
|
||||
@ -721,7 +728,7 @@ static WC_INLINE int StackSizeCheck_reap(pthread_t threadId, void *stack_context
|
||||
void *status;
|
||||
int ret = pthread_join(threadId, &status);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "pthread_join failed\n");
|
||||
wc_mem_printf("pthread_join failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -770,12 +777,12 @@ static WC_INLINE void StackTrap(void)
|
||||
{
|
||||
struct rlimit rl;
|
||||
if (getrlimit(RLIMIT_STACK, &rl) != 0) {
|
||||
fprintf(stderr, "getrlimit failed\n");
|
||||
wc_mem_printf("getrlimit failed\n");
|
||||
}
|
||||
printf("rlim_cur = %llu\n", rl.rlim_cur);
|
||||
rl.rlim_cur = 1024*21; /* adjust trap size here */
|
||||
if (setrlimit(RLIMIT_STACK, &rl) != 0) {
|
||||
fprintf(stderr, "setrlimit failed\n");
|
||||
wc_mem_printf("setrlimit failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -598,8 +598,8 @@ typedef struct w64wrapper {
|
||||
#define WC_DECLARE_ARRAY(VAR_NAME, VAR_TYPE, VAR_ITEMS, VAR_SIZE, HEAP) \
|
||||
VAR_TYPE VAR_NAME[VAR_ITEMS][VAR_SIZE]
|
||||
#define WC_INIT_ARRAY(VAR_NAME, VAR_TYPE, VAR_ITEMS, VAR_SIZE, HEAP) do {} while(0)
|
||||
#define WC_FREE_VAR(VAR_NAME, HEAP) /* nothing to free, its stack */
|
||||
#define WC_FREE_ARRAY(VAR_NAME, VAR_ITEMS, HEAP) /* nothing to free, its stack */
|
||||
#define WC_FREE_VAR(VAR_NAME, HEAP) do {} while(0) /* nothing to free, its stack */
|
||||
#define WC_FREE_ARRAY(VAR_NAME, VAR_ITEMS, HEAP) do {} while(0) /* nothing to free, its stack */
|
||||
|
||||
#define WC_DECLARE_ARRAY_DYNAMIC_DEC(VAR_NAME, VAR_TYPE, VAR_ITEMS, VAR_SIZE, HEAP) \
|
||||
VAR_TYPE* VAR_NAME[VAR_ITEMS]; \
|
||||
|
Reference in New Issue
Block a user