mirror of
https://github.com/wolfSSL/wolfssl.git
synced 2025-07-30 18:57:27 +02:00
linuxkm/linuxkm_memory.c: refactor SAVE/RESTORE_VECTOR_REGISTERS() to be per-process rather than per-CPU, and add migrate_disable/enable() to kernel_fpu_begin/end() because preempt_disable() is just a barrier on _PREEMPT_VOLUNTARY kernels;
linuxkm/linuxkm_wc_port.h: activate SAVE/RESTORE_VECTOR_REGISTERS() whenever defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) for benchmark.c support, independent of vector crypto features; fix and optimize various alignment issues with stack and heap allocations; fix macro definitions for XMALLOC/XREALLOC/XFREE to correctly use kvmalloc and friends when defined(HAVE_KVMALLOC), and to use wolfSSL_Malloc() and friends when defined(WOLFSSL_TRACK_MEMORY); purge stale LINUXKM_SIMD_IRQ code.
This commit is contained in:
@ -90,6 +90,11 @@ ifeq "$(ENABLED_LINUXKM_PIE)" "yes"
|
|||||||
$(obj)/linuxkm/module_hooks.o: ccflags-y += $(PIE_SUPPORT_FLAGS)
|
$(obj)/linuxkm/module_hooks.o: ccflags-y += $(PIE_SUPPORT_FLAGS)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq "$(ENABLED_LINUXKM_BENCHMARKS)" "yes"
|
||||||
|
$(obj)/linuxkm/module_hooks.o: ccflags-y = $(WOLFSSL_CFLAGS) $(CFLAGS_FPU_ENABLE) $(CFLAGS_SIMD_ENABLE) $(PIE_SUPPORT_FLAGS)
|
||||||
|
$(obj)/linuxkm/module_hooks.o: asflags-y = $(WOLFSSL_ASFLAGS) $(ASFLAGS_FPU_ENABLE_SIMD_DISABLE)
|
||||||
|
endif
|
||||||
|
|
||||||
asflags-y := $(WOLFSSL_ASFLAGS) $(ASFLAGS_FPUSIMD_DISABLE)
|
asflags-y := $(WOLFSSL_ASFLAGS) $(ASFLAGS_FPUSIMD_DISABLE)
|
||||||
|
|
||||||
# vectorized implementations that are kernel-safe are listed here.
|
# vectorized implementations that are kernel-safe are listed here.
|
||||||
|
@ -21,320 +21,262 @@
|
|||||||
|
|
||||||
/* included by wolfcrypt/src/memory.c */
|
/* included by wolfcrypt/src/memory.c */
|
||||||
|
|
||||||
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
|
#ifdef HAVE_KVMALLOC
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
/* adapted from kvrealloc() draft by Changli Gao, 2010-05-13 */
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
void *lkm_realloc(void *ptr, size_t newsize) {
|
||||||
static union fpregs_state **wolfcrypt_linuxkm_fpu_states = NULL;
|
void *nptr;
|
||||||
#else
|
size_t oldsize;
|
||||||
static struct fpstate **wolfcrypt_linuxkm_fpu_states = NULL;
|
|
||||||
#endif
|
|
||||||
#else
|
|
||||||
static unsigned int *wolfcrypt_linuxkm_fpu_states = NULL;
|
|
||||||
#define WC_FPU_COUNT_MASK 0x7fffffffU
|
|
||||||
#define WC_FPU_SAVED_MASK 0x80000000U
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static WARN_UNUSED_RESULT inline int am_in_hard_interrupt_handler(void)
|
if (unlikely(newsize == 0)) {
|
||||||
{
|
kvfree(ptr);
|
||||||
return (preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0;
|
return ZERO_SIZE_PTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
|
if (unlikely(ptr == NULL))
|
||||||
{
|
return kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
|
||||||
wolfcrypt_linuxkm_fpu_states =
|
|
||||||
(union fpregs_state **)kzalloc(nr_cpu_ids
|
|
||||||
* sizeof(struct fpu_state *),
|
|
||||||
GFP_KERNEL);
|
|
||||||
#else
|
|
||||||
wolfcrypt_linuxkm_fpu_states =
|
|
||||||
(struct fpstate **)kzalloc(nr_cpu_ids
|
|
||||||
* sizeof(struct fpstate *),
|
|
||||||
GFP_KERNEL);
|
|
||||||
#endif
|
|
||||||
#else
|
|
||||||
wolfcrypt_linuxkm_fpu_states =
|
|
||||||
(unsigned int *)kzalloc(nr_cpu_ids * sizeof(unsigned int),
|
|
||||||
GFP_KERNEL);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (! wolfcrypt_linuxkm_fpu_states) {
|
if (is_vmalloc_addr(ptr)) {
|
||||||
pr_err("warning, allocation of %lu bytes for "
|
/* no way to discern the size of the old allocation,
|
||||||
"wolfcrypt_linuxkm_fpu_states failed.\n",
|
* because the kernel doesn't export find_vm_area(). if
|
||||||
nr_cpu_ids * sizeof(struct fpu_state *));
|
* it did, we could then call get_vm_area_size() on the
|
||||||
return MEMORY_E;
|
* returned struct vm_struct.
|
||||||
}
|
*/
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
return NULL;
|
||||||
{
|
} else {
|
||||||
typeof(nr_cpu_ids) i;
|
#ifndef __PIE__
|
||||||
for (i=0; i<nr_cpu_ids; ++i) {
|
struct page *page;
|
||||||
_Static_assert(sizeof(union fpregs_state) <= PAGE_SIZE,
|
|
||||||
"union fpregs_state is larger than expected.");
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
|
||||||
wolfcrypt_linuxkm_fpu_states[i] =
|
|
||||||
(union fpregs_state *)kzalloc(PAGE_SIZE
|
|
||||||
/* sizeof(union fpregs_state) */,
|
|
||||||
GFP_KERNEL);
|
|
||||||
#else
|
|
||||||
wolfcrypt_linuxkm_fpu_states[i] =
|
|
||||||
(struct fpstate *)kzalloc(PAGE_SIZE
|
|
||||||
/* sizeof(struct fpstate) */,
|
|
||||||
GFP_KERNEL);
|
|
||||||
#endif
|
|
||||||
if (! wolfcrypt_linuxkm_fpu_states[i])
|
|
||||||
break;
|
|
||||||
/* double-check that the allocation is 64-byte-aligned as needed
|
|
||||||
* for xsave.
|
|
||||||
*/
|
|
||||||
if ((unsigned long)wolfcrypt_linuxkm_fpu_states[i] & 63UL) {
|
|
||||||
pr_err("warning, allocation for wolfcrypt_linuxkm_fpu_states "
|
|
||||||
"was not properly aligned (%px).\n",
|
|
||||||
wolfcrypt_linuxkm_fpu_states[i]);
|
|
||||||
kfree(wolfcrypt_linuxkm_fpu_states[i]);
|
|
||||||
wolfcrypt_linuxkm_fpu_states[i] = 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (i < nr_cpu_ids) {
|
|
||||||
pr_err("warning, only %u/%u allocations succeeded for "
|
|
||||||
"wolfcrypt_linuxkm_fpu_states.\n",
|
|
||||||
i, nr_cpu_ids);
|
|
||||||
return MEMORY_E;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif /* LINUXKM_SIMD_IRQ */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void free_wolfcrypt_linuxkm_fpu_states(void)
|
page = virt_to_head_page(ptr);
|
||||||
{
|
if (PageSlab(page) || PageCompound(page)) {
|
||||||
if (wolfcrypt_linuxkm_fpu_states) {
|
if (newsize < PAGE_SIZE)
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
#endif /* ! __PIE__ */
|
||||||
typeof(nr_cpu_ids) i;
|
return krealloc(ptr, newsize, GFP_KERNEL);
|
||||||
for (i=0; i<nr_cpu_ids; ++i) {
|
#ifndef __PIE__
|
||||||
if (wolfcrypt_linuxkm_fpu_states[i])
|
oldsize = ksize(ptr);
|
||||||
kfree(wolfcrypt_linuxkm_fpu_states[i]);
|
|
||||||
}
|
|
||||||
#endif /* LINUXKM_SIMD_IRQ */
|
|
||||||
kfree(wolfcrypt_linuxkm_fpu_states);
|
|
||||||
wolfcrypt_linuxkm_fpu_states = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
WARN_UNUSED_RESULT int save_vector_registers_x86(void)
|
|
||||||
{
|
|
||||||
int processor_id;
|
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
|
|
||||||
processor_id = smp_processor_id();
|
|
||||||
|
|
||||||
{
|
|
||||||
static int _warned_on_null = -1;
|
|
||||||
if ((wolfcrypt_linuxkm_fpu_states == NULL)
|
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
|
||||||
|| (wolfcrypt_linuxkm_fpu_states[processor_id] == NULL)
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
{
|
|
||||||
preempt_enable();
|
|
||||||
if (_warned_on_null < processor_id) {
|
|
||||||
_warned_on_null = processor_id;
|
|
||||||
pr_err("save_vector_registers_x86 called for cpu id %d "
|
|
||||||
"with null context buffer.\n", processor_id);
|
|
||||||
}
|
|
||||||
return BAD_STATE_E;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (! irq_fpu_usable()) {
|
|
||||||
|
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
|
||||||
if (am_in_hard_interrupt_handler()) {
|
|
||||||
|
|
||||||
/* allow for nested calls */
|
|
||||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
|
|
||||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
|
|
||||||
preempt_enable();
|
|
||||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
|
||||||
"cpu id %d.\n", processor_id);
|
|
||||||
return BAD_STATE_E;
|
|
||||||
} else {
|
|
||||||
++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* note, fpregs_lock() is not needed here, because
|
|
||||||
* interrupts/preemptions are already disabled here.
|
|
||||||
*/
|
|
||||||
{
|
|
||||||
/* save_fpregs_to_fpstate() only accesses fpu->state, which
|
|
||||||
* has stringent alignment requirements (64 byte cache
|
|
||||||
* line), but takes a pointer to the parent struct. work
|
|
||||||
* around this.
|
|
||||||
*/
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
|
||||||
struct fpu *fake_fpu_pointer =
|
|
||||||
(struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
|
|
||||||
- offsetof(struct fpu, state));
|
|
||||||
copy_fpregs_to_fpstate(fake_fpu_pointer);
|
|
||||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
|
||||||
struct fpu *fake_fpu_pointer =
|
|
||||||
(struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
|
|
||||||
- offsetof(struct fpu, state));
|
|
||||||
save_fpregs_to_fpstate(fake_fpu_pointer);
|
|
||||||
#else
|
|
||||||
struct fpu *fake_fpu_pointer =
|
|
||||||
(struct fpu *)(((char *)wolfcrypt_linuxkm_fpu_states[processor_id])
|
|
||||||
- offsetof(struct fpu, fpstate));
|
|
||||||
save_fpregs_to_fpstate(fake_fpu_pointer);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
/* mark the slot as used. */
|
|
||||||
((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
|
|
||||||
/* note, not preempt_enable()ing, mirroring kernel_fpu_begin()
|
|
||||||
* semantics, even though routine will have been entered already
|
|
||||||
* non-preemptable.
|
|
||||||
*/
|
|
||||||
return 0;
|
|
||||||
} else
|
|
||||||
#endif /* LINUXKM_SIMD_IRQ */
|
|
||||||
{
|
|
||||||
preempt_enable();
|
|
||||||
return BAD_STATE_E;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
|
oldsize = page->private;
|
||||||
|
if (newsize <= oldsize)
|
||||||
|
return ptr;
|
||||||
|
}
|
||||||
|
#endif /* ! __PIE__ */
|
||||||
|
}
|
||||||
|
|
||||||
/* allow for nested calls */
|
nptr = kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
if (nptr != NULL) {
|
||||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
|
memcpy(nptr, ptr, oldsize);
|
||||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 255) {
|
kvfree(ptr);
|
||||||
preempt_enable();
|
}
|
||||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
|
||||||
"cpu id %d.\n", processor_id);
|
|
||||||
return BAD_STATE_E;
|
|
||||||
} else {
|
|
||||||
++((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1];
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kernel_fpu_begin();
|
|
||||||
preempt_enable(); /* kernel_fpu_begin() does its own
|
|
||||||
* preempt_disable(). decrement ours.
|
|
||||||
*/
|
|
||||||
((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
|
|
||||||
#else /* !LINUXKM_SIMD_IRQ */
|
|
||||||
if (wolfcrypt_linuxkm_fpu_states[processor_id] != 0U) {
|
|
||||||
if ((wolfcrypt_linuxkm_fpu_states[processor_id] & WC_FPU_COUNT_MASK)
|
|
||||||
== WC_FPU_COUNT_MASK)
|
|
||||||
{
|
|
||||||
preempt_enable();
|
|
||||||
pr_err("save_vector_registers_x86 recursion register overflow for "
|
|
||||||
"cpu id %d.\n", processor_id);
|
|
||||||
return BAD_STATE_E;
|
|
||||||
} else {
|
|
||||||
++wolfcrypt_linuxkm_fpu_states[processor_id];
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if kernel_fpu_begin() won't actually save the reg file (because
|
return nptr;
|
||||||
* it was already saved and invalidated, or because we're in a
|
}
|
||||||
* kernel thread), don't call kernel_fpu_begin() here, nor call
|
#endif /* HAVE_KVMALLOC */
|
||||||
* kernel_fpu_end() in cleanup. this avoids pointless overhead. in
|
|
||||||
* kernels >=5.17.12 (from changes to irq_fpu_usable() in linux
|
|
||||||
* commit 59f5ede3bc0f, backported somewhere >5.17.5), this also
|
|
||||||
* fixes register corruption.
|
|
||||||
*/
|
|
||||||
if ((current->flags & PF_KTHREAD) ||
|
|
||||||
test_thread_flag(TIF_NEED_FPU_LOAD))
|
|
||||||
{
|
|
||||||
wolfcrypt_linuxkm_fpu_states[processor_id] =
|
|
||||||
WC_FPU_SAVED_MASK + 1U; /* set msb 1 to inhibit kernel_fpu_end() at cleanup. */
|
|
||||||
/* keep preempt_disable()d from above. */
|
|
||||||
} else {
|
|
||||||
kernel_fpu_begin();
|
|
||||||
preempt_enable(); /* kernel_fpu_begin() does its own
|
|
||||||
* preempt_disable(). decrement ours.
|
|
||||||
*/
|
|
||||||
wolfcrypt_linuxkm_fpu_states[processor_id] = 1U; /* set msb 0 to trigger kernel_fpu_end() at cleanup. */
|
|
||||||
}
|
|
||||||
#endif /* !LINUXKM_SIMD_IRQ */
|
|
||||||
|
|
||||||
|
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
|
||||||
|
|
||||||
|
static unsigned int wc_linuxkm_fpu_states_n_tracked = 0;
|
||||||
|
|
||||||
|
struct wc_thread_fpu_count_ent {
|
||||||
|
volatile pid_t pid;
|
||||||
|
unsigned int fpu_state;
|
||||||
|
};
|
||||||
|
struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_states = NULL;
|
||||||
|
#define WC_FPU_COUNT_MASK 0x7fffffffU
|
||||||
|
#define WC_FPU_SAVED_MASK 0x80000000U
|
||||||
|
|
||||||
|
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
|
||||||
|
{
|
||||||
|
if (wc_linuxkm_fpu_states != NULL) {
|
||||||
|
static int warned_for_repeat_alloc = 0;
|
||||||
|
if (! warned_for_repeat_alloc) {
|
||||||
|
pr_err("attempt at repeat allocation"
|
||||||
|
" in allocate_wolfcrypt_linuxkm_fpu_states\n");
|
||||||
|
warned_for_repeat_alloc = 1;
|
||||||
|
}
|
||||||
|
return BAD_STATE_E;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nr_cpu_ids >= 16)
|
||||||
|
wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids * 2;
|
||||||
|
else
|
||||||
|
wc_linuxkm_fpu_states_n_tracked = 32;
|
||||||
|
|
||||||
|
wc_linuxkm_fpu_states =
|
||||||
|
(struct wc_thread_fpu_count_ent *)malloc(
|
||||||
|
wc_linuxkm_fpu_states_n_tracked * sizeof(wc_linuxkm_fpu_states[0]));
|
||||||
|
|
||||||
|
if (! wc_linuxkm_fpu_states) {
|
||||||
|
pr_err("allocation of %lu bytes for "
|
||||||
|
"wc_linuxkm_fpu_states failed.\n",
|
||||||
|
nr_cpu_ids * sizeof(struct fpu_state *));
|
||||||
|
return MEMORY_E;
|
||||||
|
}
|
||||||
|
|
||||||
|
memset(wc_linuxkm_fpu_states, 0, wc_linuxkm_fpu_states_n_tracked * sizeof(wc_linuxkm_fpu_states[0]));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void free_wolfcrypt_linuxkm_fpu_states(void) {
|
||||||
|
struct wc_thread_fpu_count_ent *i, *i_endptr;
|
||||||
|
pid_t i_pid;
|
||||||
|
|
||||||
|
if (wc_linuxkm_fpu_states == NULL) {
|
||||||
|
pr_err("free_wolfcrypt_linuxkm_fpu_states called"
|
||||||
|
" before allocate_wolfcrypt_linuxkm_fpu_states.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = wc_linuxkm_fpu_states,
|
||||||
|
i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
|
||||||
|
i < i_endptr;
|
||||||
|
++i)
|
||||||
|
{
|
||||||
|
i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
|
||||||
|
if (i_pid == 0)
|
||||||
|
continue;
|
||||||
|
if (i->fpu_state != 0) {
|
||||||
|
pr_err("free_wolfcrypt_linuxkm_fpu_states called"
|
||||||
|
" with nonzero state 0x%x for pid %d.\n", i->fpu_state, i_pid);
|
||||||
|
i->fpu_state = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
free(wc_linuxkm_fpu_states);
|
||||||
|
wc_linuxkm_fpu_states = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* lock-(mostly)-free thread-local storage facility for tracking recursive fpu pushing/popping */
|
||||||
|
static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(int create_p) {
|
||||||
|
struct wc_thread_fpu_count_ent *i, *i_endptr, *i_empty;
|
||||||
|
pid_t my_pid = task_pid_nr(current), i_pid;
|
||||||
|
|
||||||
|
{
|
||||||
|
static int _warned_on_null = 0;
|
||||||
|
if (wc_linuxkm_fpu_states == NULL)
|
||||||
|
{
|
||||||
|
if (_warned_on_null == 0) {
|
||||||
|
pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
|
||||||
|
" before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
|
||||||
|
_warned_on_null = 1;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
for (i = wc_linuxkm_fpu_states,
|
||||||
|
i_empty = NULL;
|
||||||
|
i < i_endptr;
|
||||||
|
++i)
|
||||||
|
{
|
||||||
|
i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
|
||||||
|
if (i_pid == my_pid)
|
||||||
|
return i;
|
||||||
|
if ((i_empty == NULL) && (i_pid == 0))
|
||||||
|
i_empty = i;
|
||||||
|
}
|
||||||
|
if ((i_empty == NULL) || (! create_p))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
i_pid = 0;
|
||||||
|
if (__atomic_compare_exchange_n(
|
||||||
|
&(i_empty->pid),
|
||||||
|
&i_pid,
|
||||||
|
my_pid,
|
||||||
|
0 /* weak */,
|
||||||
|
__ATOMIC_SEQ_CST /* success_memmodel */,
|
||||||
|
__ATOMIC_SEQ_CST /* failure_memmodel */))
|
||||||
|
{
|
||||||
|
return i_empty;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void wc_linuxkm_fpu_state_free(struct wc_thread_fpu_count_ent *ent) {
|
||||||
|
if (ent->fpu_state != 0) {
|
||||||
|
static int warned_nonzero_fpu_state = 0;
|
||||||
|
if (! warned_nonzero_fpu_state) {
|
||||||
|
pr_err("wc_linuxkm_fpu_state_free for pid %d"
|
||||||
|
" with nonzero fpu_state 0x%x.\n", ent->pid, ent->fpu_state);
|
||||||
|
warned_nonzero_fpu_state = 1;
|
||||||
|
}
|
||||||
|
ent->fpu_state = 0;
|
||||||
|
}
|
||||||
|
__atomic_store_n(&ent->pid, 0, __ATOMIC_RELEASE);
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_UNUSED_RESULT int save_vector_registers_x86(void)
|
||||||
|
{
|
||||||
|
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(1);
|
||||||
|
if (pstate == NULL)
|
||||||
|
return ENOMEM;
|
||||||
|
|
||||||
|
/* allow for nested calls */
|
||||||
|
if (pstate->fpu_state != 0U) {
|
||||||
|
if ((pstate->fpu_state & WC_FPU_COUNT_MASK)
|
||||||
|
== WC_FPU_COUNT_MASK)
|
||||||
|
{
|
||||||
|
pr_err("save_vector_registers_x86 recursion register overflow for "
|
||||||
|
"pid %d.\n", pstate->pid);
|
||||||
|
return BAD_STATE_E;
|
||||||
|
} else {
|
||||||
|
++pstate->fpu_state;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void restore_vector_registers_x86(void)
|
|
||||||
{
|
|
||||||
int processor_id = smp_processor_id();
|
|
||||||
|
|
||||||
if ((wolfcrypt_linuxkm_fpu_states == NULL)
|
if (irq_fpu_usable()) {
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||||
|| (wolfcrypt_linuxkm_fpu_states[processor_id] == NULL)
|
/* inhibit migration, which gums up the algorithm in kernel_fpu_{begin,end}(). */
|
||||||
|
migrate_disable();
|
||||||
#endif
|
#endif
|
||||||
)
|
kernel_fpu_begin();
|
||||||
{
|
pstate->fpu_state = 1U; /* set msb 0 to trigger kernel_fpu_end() at cleanup. */
|
||||||
pr_err("restore_vector_registers_x86 called for cpu id %d "
|
} else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0)) {
|
||||||
"with null context buffer.\n", processor_id);
|
static int warned_fpu_forbidden = 0;
|
||||||
return;
|
if (! warned_fpu_forbidden)
|
||||||
}
|
pr_err("save_vector_registers_x86 called from IRQ handler.\n");
|
||||||
|
wc_linuxkm_fpu_state_free(pstate);
|
||||||
|
return EPERM;
|
||||||
|
} else {
|
||||||
|
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||||
|
migrate_disable();
|
||||||
|
#endif
|
||||||
|
/* assume already safely in_kernel_fpu. */
|
||||||
|
pstate->fpu_state =
|
||||||
|
WC_FPU_SAVED_MASK + 1U; /* set msb 1 to inhibit kernel_fpu_end() at cleanup. */
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
return 0;
|
||||||
if (((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] == 0)
|
}
|
||||||
{
|
|
||||||
pr_err("restore_vector_registers_x86 called for cpu id %d "
|
|
||||||
"without saved context.\n", processor_id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (--((unsigned char *)wolfcrypt_linuxkm_fpu_states[processor_id])[PAGE_SIZE-1] > 0) {
|
|
||||||
preempt_enable(); /* preempt_disable count will still be nonzero after this decrement. */
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (am_in_hard_interrupt_handler()) {
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
|
||||||
copy_kernel_to_fpregs(wolfcrypt_linuxkm_fpu_states[processor_id]);
|
|
||||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
|
||||||
__restore_fpregs_from_fpstate(wolfcrypt_linuxkm_fpu_states[processor_id],
|
|
||||||
xfeatures_mask_all);
|
|
||||||
#else
|
|
||||||
restore_fpregs_from_fpstate(wolfcrypt_linuxkm_fpu_states[processor_id],
|
|
||||||
fpu_kernel_cfg.max_features);
|
|
||||||
#endif
|
|
||||||
preempt_enable();
|
|
||||||
} else {
|
|
||||||
kernel_fpu_end();
|
|
||||||
}
|
|
||||||
#else /* !LINUXKM_SIMD_IRQ */
|
|
||||||
if ((wolfcrypt_linuxkm_fpu_states[processor_id] & WC_FPU_COUNT_MASK) == 0U)
|
|
||||||
{
|
|
||||||
pr_err("restore_vector_registers_x86 called for cpu id %d "
|
|
||||||
"without saved context.\n", processor_id);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((--wolfcrypt_linuxkm_fpu_states[processor_id] & WC_FPU_COUNT_MASK) > 0U) {
|
|
||||||
preempt_enable(); /* preempt_disable count may still be nonzero
|
|
||||||
* after this decrement, but any remaining
|
|
||||||
* count(s) aren't ours.
|
|
||||||
*/
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (wolfcrypt_linuxkm_fpu_states[processor_id] == 0U) {
|
|
||||||
kernel_fpu_end();
|
|
||||||
} else {
|
|
||||||
preempt_enable(); /* preempt_disable count will still be nonzero
|
|
||||||
* after this decrement.
|
|
||||||
*/
|
|
||||||
wolfcrypt_linuxkm_fpu_states[processor_id] = 0U;
|
|
||||||
}
|
|
||||||
#endif /* !LINUXKM_SIMD_IRQ */
|
|
||||||
|
|
||||||
|
void restore_vector_registers_x86(void)
|
||||||
|
{
|
||||||
|
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0);
|
||||||
|
if (pstate == NULL) {
|
||||||
|
pr_err("restore_vector_registers_x86 called by pid %d "
|
||||||
|
"with no saved state.\n", task_pid_nr(current));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((--pstate->fpu_state & WC_FPU_COUNT_MASK) > 0U) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pstate->fpu_state == 0U)
|
||||||
|
kernel_fpu_end();
|
||||||
|
else
|
||||||
|
pstate->fpu_state = 0U;
|
||||||
|
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||||
|
migrate_enable();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
wc_linuxkm_fpu_state_free(pstate);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS && CONFIG_X86 */
|
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS && CONFIG_X86 */
|
||||||
|
|
||||||
#if defined(__PIE__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
#if defined(__PIE__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0))
|
||||||
|
@ -24,6 +24,12 @@
|
|||||||
#ifndef LINUXKM_WC_PORT_H
|
#ifndef LINUXKM_WC_PORT_H
|
||||||
#define LINUXKM_WC_PORT_H
|
#define LINUXKM_WC_PORT_H
|
||||||
|
|
||||||
|
#include <linux/version.h>
|
||||||
|
|
||||||
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
|
||||||
|
#error Unsupported kernel.
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef HAVE_CONFIG_H
|
#ifdef HAVE_CONFIG_H
|
||||||
#ifndef PACKAGE_NAME
|
#ifndef PACKAGE_NAME
|
||||||
#error wc_port.h included before config.h
|
#error wc_port.h included before config.h
|
||||||
@ -59,6 +65,23 @@
|
|||||||
(int)_xatoi_res; \
|
(int)_xatoi_res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
/* Kbuild+gcc on x86 doesn't consistently honor the default ALIGN16 on stack objects,
|
||||||
|
* but gives adequate alignment with "32".
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_X86) && !defined(ALIGN16)
|
||||||
|
#define ALIGN16 __attribute__ ( (aligned (32)))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* kvmalloc()/kvfree() and friends added in linux commit a7c3e901 */
|
||||||
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
|
||||||
|
#define HAVE_KVMALLOC
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* kernel printf doesn't implement fp. */
|
||||||
|
#ifndef WOLFSSL_NO_FLOAT_FMT
|
||||||
|
#define WOLFSSL_NO_FLOAT_FMT
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef BUILDING_WOLFSSL
|
#ifdef BUILDING_WOLFSSL
|
||||||
|
|
||||||
#if defined(CONFIG_MIPS) && defined(HAVE_LINUXKM_PIE_SUPPORT)
|
#if defined(CONFIG_MIPS) && defined(HAVE_LINUXKM_PIE_SUPPORT)
|
||||||
@ -95,7 +118,6 @@
|
|||||||
|
|
||||||
#include <linux/kconfig.h>
|
#include <linux/kconfig.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/version.h>
|
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@ -124,6 +146,8 @@
|
|||||||
#ifndef CONFIG_X86
|
#ifndef CONFIG_X86
|
||||||
#error X86 SIMD extensions requested, but CONFIG_X86 is not set.
|
#error X86 SIMD extensions requested, but CONFIG_X86 is not set.
|
||||||
#endif
|
#endif
|
||||||
|
#define WOLFSSL_LINUXKM_SIMD
|
||||||
|
#define WOLFSSL_LINUXKM_SIMD_X86
|
||||||
#ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
#ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
#endif
|
#endif
|
||||||
@ -133,6 +157,8 @@
|
|||||||
#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
|
#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
|
||||||
#error ARM SIMD extensions requested, but CONFIG_ARM* is not set.
|
#error ARM SIMD extensions requested, but CONFIG_ARM* is not set.
|
||||||
#endif
|
#endif
|
||||||
|
#define WOLFSSL_LINUXKM_SIMD
|
||||||
|
#define WOLFSSL_LINUXKM_SIMD_ARM
|
||||||
#ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
#ifndef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
#endif
|
#endif
|
||||||
@ -142,26 +168,17 @@
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* benchmarks.c uses floating point math, so needs a working SAVE_VECTOR_REGISTERS(). */
|
||||||
|
#if defined(WOLFSSL_LINUXKM_BENCHMARKS) && !defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS)
|
||||||
|
#define WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
|
#if defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && defined(CONFIG_X86)
|
||||||
#define WOLFSSL_LINUXKM_SIMD
|
|
||||||
#define WOLFSSL_LINUXKM_SIMD_X86
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||||
#include <asm/i387.h>
|
#include <asm/i387.h>
|
||||||
#else
|
#else
|
||||||
#include <asm/simd.h>
|
#include <asm/simd.h>
|
||||||
#endif
|
#endif
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
|
|
||||||
#include <asm/fpu/internal.h>
|
|
||||||
#endif
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
|
|
||||||
#error LINUXKM_SIMD_IRQ is unavailable on linux >= 5.16 (missing exports around fpregs)
|
|
||||||
/*
|
|
||||||
* #include <asm/fpu/sched.h>
|
|
||||||
* #include <asm/fpu/signal.h>
|
|
||||||
*/
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#ifndef SAVE_VECTOR_REGISTERS
|
#ifndef SAVE_VECTOR_REGISTERS
|
||||||
#define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_x86(); if (_svr_ret != 0) { fail_clause } }
|
#define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_x86(); if (_svr_ret != 0) { fail_clause } }
|
||||||
#endif
|
#endif
|
||||||
@ -169,12 +186,7 @@
|
|||||||
#define RESTORE_VECTOR_REGISTERS() restore_vector_registers_x86()
|
#define RESTORE_VECTOR_REGISTERS() restore_vector_registers_x86()
|
||||||
#endif
|
#endif
|
||||||
#elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
|
#elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
|
||||||
#define WOLFSSL_LINUXKM_SIMD
|
|
||||||
#define WOLFSSL_LINUXKM_SIMD_ARM
|
|
||||||
#include <asm/fpsimd.h>
|
#include <asm/fpsimd.h>
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
|
||||||
#error LINUXKM_SIMD_IRQ is unavailable on ARM (not implemented)
|
|
||||||
#endif
|
|
||||||
#ifndef SAVE_VECTOR_REGISTERS
|
#ifndef SAVE_VECTOR_REGISTERS
|
||||||
#define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_arm(); if (_svr_ret != 0) { fail_clause } }
|
#define SAVE_VECTOR_REGISTERS(fail_clause) { int _svr_ret = save_vector_registers_arm(); if (_svr_ret != 0) { fail_clause } }
|
||||||
#endif
|
#endif
|
||||||
@ -195,11 +207,6 @@
|
|||||||
#define NO_THREAD_LS
|
#define NO_THREAD_LS
|
||||||
#define NO_ATTRIBUTE_CONSTRUCTOR
|
#define NO_ATTRIBUTE_CONSTRUCTOR
|
||||||
|
|
||||||
/* kvmalloc()/kvfree() and friends added in linux commit a7c3e901 */
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
|
|
||||||
#define HAVE_KVMALLOC
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef HAVE_FIPS
|
#ifdef HAVE_FIPS
|
||||||
extern int wolfCrypt_FIPS_first(void);
|
extern int wolfCrypt_FIPS_first(void);
|
||||||
extern int wolfCrypt_FIPS_last(void);
|
extern int wolfCrypt_FIPS_last(void);
|
||||||
@ -215,7 +222,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__PIE__) && !defined(USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE)
|
#if defined(__PIE__) && !defined(USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE)
|
||||||
#error "compiling -fPIE without PIE support."
|
#error "compiling -fPIE requires PIE redirect table."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(HAVE_FIPS) && !defined(HAVE_LINUXKM_PIE_SUPPORT)
|
#if defined(HAVE_FIPS) && !defined(HAVE_LINUXKM_PIE_SUPPORT)
|
||||||
@ -307,42 +314,37 @@
|
|||||||
struct task_struct *(*get_current)(void);
|
struct task_struct *(*get_current)(void);
|
||||||
int (*preempt_count)(void);
|
int (*preempt_count)(void);
|
||||||
|
|
||||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
typeof(irq_fpu_usable) *irq_fpu_usable;
|
|
||||||
/* kernel_fpu_begin() replaced by kernel_fpu_begin_mask() in commit e4512289,
|
|
||||||
* released in kernel 5.11, backported to 5.4.93
|
|
||||||
*/
|
|
||||||
#ifdef kernel_fpu_begin
|
|
||||||
typeof(kernel_fpu_begin_mask) *kernel_fpu_begin_mask;
|
|
||||||
#else
|
|
||||||
typeof(kernel_fpu_begin) *kernel_fpu_begin;
|
|
||||||
#endif
|
|
||||||
typeof(kernel_fpu_end) *kernel_fpu_end;
|
|
||||||
|
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
typeof(cpu_number) *cpu_number;
|
||||||
typeof(copy_fpregs_to_fpstate) *copy_fpregs_to_fpstate;
|
#else
|
||||||
typeof(copy_kernel_to_fpregs) *copy_kernel_to_fpregs;
|
typeof(pcpu_hot) *pcpu_hot;
|
||||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
|
||||||
typeof(save_fpregs_to_fpstate) *save_fpregs_to_fpstate;
|
|
||||||
typeof(__restore_fpregs_from_fpstate) *__restore_fpregs_from_fpstate;
|
|
||||||
typeof(xfeatures_mask_all) *xfeatures_mask_all;
|
|
||||||
/*
|
|
||||||
* #else
|
|
||||||
* typeof(save_fpregs_to_fpstate) *save_fpregs_to_fpstate;
|
|
||||||
* typeof(restore_fpregs_from_fpstate) *restore_fpregs_from_fpstate;
|
|
||||||
* typeof(fpu_kernel_cfg) *fpu_kernel_cfg;
|
|
||||||
*/
|
|
||||||
#endif
|
#endif
|
||||||
#endif
|
typeof(nr_cpu_ids) *nr_cpu_ids;
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
|
||||||
typeof(cpu_number) *cpu_number;
|
|
||||||
#else
|
|
||||||
typeof(pcpu_hot) *pcpu_hot;
|
|
||||||
#endif
|
|
||||||
typeof(nr_cpu_ids) *nr_cpu_ids;
|
|
||||||
|
|
||||||
#endif /* WOLFSSL_LINUXKM_SIMD_X86 */
|
#if defined(CONFIG_SMP) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||||
|
/* note the current and needed version of these were added in af449901b8 (2020-Sep-17) */
|
||||||
|
typeof(migrate_disable) *migrate_disable;
|
||||||
|
typeof(migrate_enable) *migrate_enable;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
typeof(irq_fpu_usable) *irq_fpu_usable;
|
||||||
|
/* kernel_fpu_begin() replaced by kernel_fpu_begin_mask() in commit e4512289,
|
||||||
|
* released in kernel 5.11, backported to 5.4.93
|
||||||
|
*/
|
||||||
|
#ifdef kernel_fpu_begin
|
||||||
|
typeof(kernel_fpu_begin_mask) *kernel_fpu_begin_mask;
|
||||||
|
#else
|
||||||
|
typeof(kernel_fpu_begin) *kernel_fpu_begin;
|
||||||
|
#endif
|
||||||
|
typeof(kernel_fpu_end) *kernel_fpu_end;
|
||||||
|
#else /* !CONFIG_X86 */
|
||||||
|
#error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unsupported architecture.
|
||||||
|
#endif /* arch */
|
||||||
|
|
||||||
|
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||||
|
|
||||||
typeof(__mutex_init) *__mutex_init;
|
typeof(__mutex_init) *__mutex_init;
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||||
@ -452,37 +454,31 @@
|
|||||||
#undef preempt_count
|
#undef preempt_count
|
||||||
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
|
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
|
||||||
|
|
||||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
#define irq_fpu_usable (wolfssl_linuxkm_get_pie_redirect_table()->irq_fpu_usable)
|
|
||||||
#ifdef kernel_fpu_begin
|
|
||||||
#define kernel_fpu_begin_mask (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin_mask)
|
|
||||||
#else
|
|
||||||
#define kernel_fpu_begin (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin)
|
|
||||||
#endif
|
|
||||||
#define kernel_fpu_end (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_end)
|
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
|
||||||
#define copy_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->copy_fpregs_to_fpstate)
|
|
||||||
#define copy_kernel_to_fpregs (wolfssl_linuxkm_get_pie_redirect_table()->copy_kernel_to_fpregs)
|
|
||||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
|
||||||
#define save_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->save_fpregs_to_fpstate)
|
|
||||||
#define __restore_fpregs_from_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->__restore_fpregs_from_fpstate)
|
|
||||||
#define xfeatures_mask_all (*(wolfssl_linuxkm_get_pie_redirect_table()->xfeatures_mask_all))
|
|
||||||
/*
|
|
||||||
* #else
|
|
||||||
* #define save_fpregs_to_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->save_fpregs_to_fpstate)
|
|
||||||
* #define restore_fpregs_from_fpstate (wolfssl_linuxkm_get_pie_redirect_table()->restore_fpregs_from_fpstate)
|
|
||||||
* #define fpu_kernel_cfg (*(wolfssl_linuxkm_get_pie_redirect_table()->fpu_kernel_cfg))
|
|
||||||
*/
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||||
#define cpu_number (*(wolfssl_linuxkm_get_pie_redirect_table()->cpu_number))
|
#define cpu_number (*(wolfssl_linuxkm_get_pie_redirect_table()->cpu_number))
|
||||||
#else
|
#else
|
||||||
#define pcpu_hot (*(wolfssl_linuxkm_get_pie_redirect_table()->pcpu_hot))
|
#define pcpu_hot (*(wolfssl_linuxkm_get_pie_redirect_table()->pcpu_hot))
|
||||||
#endif
|
#endif
|
||||||
#define nr_cpu_ids (*(wolfssl_linuxkm_get_pie_redirect_table()->nr_cpu_ids))
|
#define nr_cpu_ids (*(wolfssl_linuxkm_get_pie_redirect_table()->nr_cpu_ids))
|
||||||
#endif
|
|
||||||
|
#if defined(CONFIG_SMP) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||||
|
#define migrate_disable (*(wolfssl_linuxkm_get_pie_redirect_table()->migrate_disable))
|
||||||
|
#define migrate_enable (*(wolfssl_linuxkm_get_pie_redirect_table()->migrate_enable))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86
|
||||||
|
#define irq_fpu_usable (wolfssl_linuxkm_get_pie_redirect_table()->irq_fpu_usable)
|
||||||
|
#ifdef kernel_fpu_begin
|
||||||
|
#define kernel_fpu_begin_mask (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin_mask)
|
||||||
|
#else
|
||||||
|
#define kernel_fpu_begin (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_begin)
|
||||||
|
#endif
|
||||||
|
#define kernel_fpu_end (wolfssl_linuxkm_get_pie_redirect_table()->kernel_fpu_end)
|
||||||
|
#else /* !CONFIG_X86 */
|
||||||
|
#error WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS is set for an unsupported architecture.
|
||||||
|
#endif /* archs */
|
||||||
|
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||||
|
|
||||||
#define __mutex_init (wolfssl_linuxkm_get_pie_redirect_table()->__mutex_init)
|
#define __mutex_init (wolfssl_linuxkm_get_pie_redirect_table()->__mutex_init)
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||||
@ -515,9 +511,9 @@
|
|||||||
|
|
||||||
#endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */
|
#endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */
|
||||||
|
|
||||||
#ifdef WOLFSSL_LINUXKM_SIMD
|
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
|
|
||||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
#ifdef CONFIG_X86
|
||||||
|
|
||||||
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states(void);
|
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states(void);
|
||||||
extern void free_wolfcrypt_linuxkm_fpu_states(void);
|
extern void free_wolfcrypt_linuxkm_fpu_states(void);
|
||||||
@ -547,7 +543,7 @@
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WOLFSSL_LINUXKM_SIMD */
|
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||||
|
|
||||||
/* remove this multifariously conflicting macro, picked up from
|
/* remove this multifariously conflicting macro, picked up from
|
||||||
* Linux arch/<arch>/include/asm/current.h.
|
* Linux arch/<arch>/include/asm/current.h.
|
||||||
@ -556,22 +552,6 @@
|
|||||||
#undef current
|
#undef current
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* prevent gcc's mm_malloc.h from being included, since it unconditionally
|
|
||||||
* includes stdlib.h, which is kernel-incompatible.
|
|
||||||
*/
|
|
||||||
#define _MM_MALLOC_H_INCLUDED
|
|
||||||
|
|
||||||
#ifdef HAVE_KVMALLOC
|
|
||||||
#define malloc(x) kvmalloc_node(x, GFP_KERNEL, NUMA_NO_NODE)
|
|
||||||
#define free(x) kvfree(x)
|
|
||||||
void *lkm_realloc(void *ptr, size_t newsize);
|
|
||||||
#define realloc(x, y) lkm_realloc(x, y)
|
|
||||||
#else
|
|
||||||
#define malloc(x) kmalloc(x, GFP_KERNEL)
|
|
||||||
#define free(x) kfree(x)
|
|
||||||
#define realloc(x,y) krealloc(x, y, GFP_KERNEL)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* min() and max() in linux/kernel.h over-aggressively type-check, producing
|
/* min() and max() in linux/kernel.h over-aggressively type-check, producing
|
||||||
* myriad spurious -Werrors throughout the codebase.
|
* myriad spurious -Werrors throughout the codebase.
|
||||||
*/
|
*/
|
||||||
@ -618,9 +598,41 @@
|
|||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
typedef struct mutex wolfSSL_Mutex;
|
typedef struct mutex wolfSSL_Mutex;
|
||||||
|
|
||||||
#define XMALLOC(s, h, t) ({(void)(h); (void)(t); kmalloc(s, GFP_KERNEL);})
|
/* prevent gcc's mm_malloc.h from being included, since it unconditionally
|
||||||
#define XFREE(p, h, t) ({void* _xp; (void)(h); _xp = (p); if(_xp) kfree(_xp);})
|
* includes stdlib.h, which is kernel-incompatible.
|
||||||
#define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); krealloc((p), (n), GFP_KERNEL);})
|
*/
|
||||||
|
#define _MM_MALLOC_H_INCLUDED
|
||||||
|
|
||||||
|
/* fun fact: since linux commit 59bb47985c, kmalloc with power-of-2 size is
|
||||||
|
* aligned to the size.
|
||||||
|
*/
|
||||||
|
#define WC_LINUXKM_ROUND_UP_P_OF_2(x) ( \
|
||||||
|
{ \
|
||||||
|
size_t _alloc_sz = (x); \
|
||||||
|
_alloc_sz = 1UL << ((sizeof(_alloc_sz) * 8UL) - __builtin_clzl(_alloc_sz)); \
|
||||||
|
_alloc_sz; \
|
||||||
|
})
|
||||||
|
#ifdef HAVE_KVMALLOC
|
||||||
|
#define malloc(size) kvmalloc_node(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL, NUMA_NO_NODE)
|
||||||
|
#define free(ptr) kvfree(ptr)
|
||||||
|
void *lkm_realloc(void *ptr, size_t newsize);
|
||||||
|
#define realloc(ptr, newsize) lkm_realloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize))
|
||||||
|
#else
|
||||||
|
#define malloc(size) kmalloc(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL)
|
||||||
|
#define free(ptr) kfree(ptr)
|
||||||
|
#define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), GFP_KERNEL)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef WOLFSSL_TRACK_MEMORY
|
||||||
|
#include <wolfssl/wolfcrypt/memory.h>
|
||||||
|
#define XMALLOC(s, h, t) ({(void)(h); (void)(t); wolfSSL_Malloc(s);})
|
||||||
|
#define XFREE(p, h, t) ({void* _xp; (void)(h); _xp = (p); if(_xp) wolfSSL_Free(_xp);})
|
||||||
|
#define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); wolfSSL_Realloc(p, n);})
|
||||||
|
#else
|
||||||
|
#define XMALLOC(s, h, t) ({(void)(h); (void)(t); malloc(s);})
|
||||||
|
#define XFREE(p, h, t) ({void* _xp; (void)(h); _xp = (p); if(_xp) free(_xp);})
|
||||||
|
#define XREALLOC(p, n, h, t) ({(void)(h); (void)(t); realloc(p, n);})
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <linux/limits.h>
|
#include <linux/limits.h>
|
||||||
|
|
||||||
|
@ -113,6 +113,15 @@ static void lkmFipsCb(int ok, int err, const char* hash)
|
|||||||
static int updateFipsHash(void);
|
static int updateFipsHash(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef WOLFSSL_LINUXKM_BENCHMARKS
|
||||||
|
#undef HAVE_PTHREAD
|
||||||
|
#define STRING_USER
|
||||||
|
#define NO_MAIN_FUNCTION
|
||||||
|
#define current_time benchmark_current_time
|
||||||
|
#define WOLFSSL_NO_FLOAT_FMT
|
||||||
|
#include "/home/douzzer/com/wolfssl/src/wolfssl/wolfcrypt/benchmark/benchmark.c"
|
||||||
|
#endif /* WOLFSSL_LINUXKM_BENCHMARKS */
|
||||||
|
|
||||||
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
|
||||||
static int __init wolfssl_init(void)
|
static int __init wolfssl_init(void)
|
||||||
#else
|
#else
|
||||||
@ -202,10 +211,9 @@ static int wolfssl_init(void)
|
|||||||
* the true module start address, which is potentially useful to an
|
* the true module start address, which is potentially useful to an
|
||||||
* attacker.
|
* attacker.
|
||||||
*/
|
*/
|
||||||
pr_info("wolfCrypt container hashes (spans): %x (%lu) %x (%lu), text base %pK, ro base %pK\n",
|
pr_info("wolfCrypt container hashes (spans): text 0x%x (%lu), rodata 0x%x (%lu)\n",
|
||||||
text_hash, pie_text_end-pie_text_start,
|
text_hash, pie_text_end-pie_text_start,
|
||||||
rodata_hash, pie_rodata_end-pie_rodata_start,
|
rodata_hash, pie_rodata_end-pie_rodata_start);
|
||||||
THIS_MODULE_TEXT_BASE, THIS_MODULE_RO_BASE);
|
|
||||||
}
|
}
|
||||||
#endif /* HAVE_LINUXKM_PIE_SUPPORT */
|
#endif /* HAVE_LINUXKM_PIE_SUPPORT */
|
||||||
|
|
||||||
@ -277,6 +285,10 @@ static int wolfssl_init(void)
|
|||||||
pr_info("wolfCrypt self-test passed.\n");
|
pr_info("wolfCrypt self-test passed.\n");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef WOLFSSL_LINUXKM_BENCHMARKS
|
||||||
|
wolfcrypt_benchmark_main(0, (char**)NULL);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef WOLFCRYPT_ONLY
|
#ifdef WOLFCRYPT_ONLY
|
||||||
pr_info("wolfCrypt " LIBWOLFSSL_VERSION_STRING " loaded%s"
|
pr_info("wolfCrypt " LIBWOLFSSL_VERSION_STRING " loaded%s"
|
||||||
".\nSee https://www.wolfssl.com/ for more information.\n"
|
".\nSee https://www.wolfssl.com/ for more information.\n"
|
||||||
@ -334,15 +346,6 @@ static int my_preempt_count(void) {
|
|||||||
return preempt_count();
|
return preempt_count();
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(WOLFSSL_LINUXKM_SIMD_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0))
|
|
||||||
static int my_copy_fpregs_to_fpstate(struct fpu *fpu) {
|
|
||||||
return copy_fpregs_to_fpstate(fpu);
|
|
||||||
}
|
|
||||||
static void my_copy_kernel_to_fpregs(union fpregs_state *fpstate) {
|
|
||||||
copy_kernel_to_fpregs(fpstate);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
||||||
memset(
|
memset(
|
||||||
&wolfssl_linuxkm_pie_redirect_table,
|
&wolfssl_linuxkm_pie_redirect_table,
|
||||||
@ -430,6 +433,20 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
|||||||
wolfssl_linuxkm_pie_redirect_table.get_current = my_get_current_thread;
|
wolfssl_linuxkm_pie_redirect_table.get_current = my_get_current_thread;
|
||||||
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
|
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
|
||||||
|
|
||||||
|
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
|
|
||||||
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
||||||
|
wolfssl_linuxkm_pie_redirect_table.cpu_number = &cpu_number;
|
||||||
|
#else
|
||||||
|
wolfssl_linuxkm_pie_redirect_table.pcpu_hot = &pcpu_hot;
|
||||||
|
#endif
|
||||||
|
wolfssl_linuxkm_pie_redirect_table.nr_cpu_ids = &nr_cpu_ids;
|
||||||
|
|
||||||
|
#if defined(CONFIG_SMP) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||||
|
wolfssl_linuxkm_pie_redirect_table.migrate_disable = &migrate_disable;
|
||||||
|
wolfssl_linuxkm_pie_redirect_table.migrate_enable = &migrate_enable;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
#ifdef WOLFSSL_LINUXKM_SIMD_X86
|
||||||
wolfssl_linuxkm_pie_redirect_table.irq_fpu_usable = irq_fpu_usable;
|
wolfssl_linuxkm_pie_redirect_table.irq_fpu_usable = irq_fpu_usable;
|
||||||
#ifdef kernel_fpu_begin
|
#ifdef kernel_fpu_begin
|
||||||
@ -440,29 +457,9 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
|||||||
kernel_fpu_begin;
|
kernel_fpu_begin;
|
||||||
#endif
|
#endif
|
||||||
wolfssl_linuxkm_pie_redirect_table.kernel_fpu_end = kernel_fpu_end;
|
wolfssl_linuxkm_pie_redirect_table.kernel_fpu_end = kernel_fpu_end;
|
||||||
#ifdef LINUXKM_SIMD_IRQ
|
#endif /* WOLFSSL_LINUXKM_SIMD_X86 */
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.copy_fpregs_to_fpstate = my_copy_fpregs_to_fpstate;
|
#endif /* WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS */
|
||||||
wolfssl_linuxkm_pie_redirect_table.copy_kernel_to_fpregs = my_copy_kernel_to_fpregs;
|
|
||||||
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.save_fpregs_to_fpstate = save_fpregs_to_fpstate;
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.__restore_fpregs_from_fpstate = __restore_fpregs_from_fpstate;
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.xfeatures_mask_all = &xfeatures_mask_all;
|
|
||||||
/*
|
|
||||||
* #else
|
|
||||||
* wolfssl_linuxkm_pie_redirect_table.save_fpregs_to_fpstate = save_fpregs_to_fpstate;
|
|
||||||
* wolfssl_linuxkm_pie_redirect_table.restore_fpregs_from_fpstate = restore_fpregs_from_fpstate;
|
|
||||||
* wolfssl_linuxkm_pie_redirect_table.fpu_kernel_cfg = &fpu_kernel_cfg;
|
|
||||||
*/
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0)
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.cpu_number = &cpu_number;
|
|
||||||
#else
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.pcpu_hot = &pcpu_hot;
|
|
||||||
#endif
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.nr_cpu_ids = &nr_cpu_ids;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.__mutex_init = __mutex_init;
|
wolfssl_linuxkm_pie_redirect_table.__mutex_init = __mutex_init;
|
||||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
|
||||||
|
@ -207,7 +207,7 @@ int wolfCrypt_Init(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(WOLFSSL_LINUXKM_SIMD_X86)
|
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
ret = allocate_wolfcrypt_linuxkm_fpu_states();
|
ret = allocate_wolfcrypt_linuxkm_fpu_states();
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
WOLFSSL_MSG("allocate_wolfcrypt_linuxkm_fpu_states failed");
|
WOLFSSL_MSG("allocate_wolfcrypt_linuxkm_fpu_states failed");
|
||||||
@ -466,7 +466,7 @@ int wolfCrypt_Cleanup(void)
|
|||||||
rpcmem_deinit();
|
rpcmem_deinit();
|
||||||
wolfSSL_CleanupHandle();
|
wolfSSL_CleanupHandle();
|
||||||
#endif
|
#endif
|
||||||
#if defined(WOLFSSL_LINUXKM_SIMD_X86)
|
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
|
||||||
free_wolfcrypt_linuxkm_fpu_states();
|
free_wolfcrypt_linuxkm_fpu_states();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -3207,56 +3207,6 @@ char* mystrnstr(const char* s1, const char* s2, unsigned int n)
|
|||||||
|
|
||||||
#endif /* WOLFSSL_NUCLEUS_1_2 */
|
#endif /* WOLFSSL_NUCLEUS_1_2 */
|
||||||
|
|
||||||
#if defined(WOLFSSL_LINUXKM) && defined(HAVE_KVMALLOC)
|
|
||||||
/* adapted from kvrealloc() draft by Changli Gao, 2010-05-13 */
|
|
||||||
void *lkm_realloc(void *ptr, size_t newsize) {
|
|
||||||
void *nptr;
|
|
||||||
size_t oldsize;
|
|
||||||
|
|
||||||
if (unlikely(newsize == 0)) {
|
|
||||||
kvfree(ptr);
|
|
||||||
return ZERO_SIZE_PTR;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(ptr == NULL))
|
|
||||||
return kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
|
||||||
|
|
||||||
if (is_vmalloc_addr(ptr)) {
|
|
||||||
/* no way to discern the size of the old allocation,
|
|
||||||
* because the kernel doesn't export find_vm_area(). if
|
|
||||||
* it did, we could then call get_vm_area_size() on the
|
|
||||||
* returned struct vm_struct.
|
|
||||||
*/
|
|
||||||
return NULL;
|
|
||||||
} else {
|
|
||||||
#ifndef __PIE__
|
|
||||||
struct page *page;
|
|
||||||
|
|
||||||
page = virt_to_head_page(ptr);
|
|
||||||
if (PageSlab(page) || PageCompound(page)) {
|
|
||||||
if (newsize < PAGE_SIZE)
|
|
||||||
#endif /* ! __PIE__ */
|
|
||||||
return krealloc(ptr, newsize, GFP_KERNEL);
|
|
||||||
#ifndef __PIE__
|
|
||||||
oldsize = ksize(ptr);
|
|
||||||
} else {
|
|
||||||
oldsize = page->private;
|
|
||||||
if (newsize <= oldsize)
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
#endif /* ! __PIE__ */
|
|
||||||
}
|
|
||||||
|
|
||||||
nptr = kvmalloc_node(newsize, GFP_KERNEL, NUMA_NO_NODE);
|
|
||||||
if (nptr != NULL) {
|
|
||||||
memcpy(nptr, ptr, oldsize);
|
|
||||||
kvfree(ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
return nptr;
|
|
||||||
}
|
|
||||||
#endif /* WOLFSSL_LINUXKM && HAVE_KVMALLOC */
|
|
||||||
|
|
||||||
#if defined(WOLFSSL_TI_CRYPT) || defined(WOLFSSL_TI_HASH)
|
#if defined(WOLFSSL_TI_CRYPT) || defined(WOLFSSL_TI_HASH)
|
||||||
#include <wolfcrypt/src/port/ti/ti-ccm.c> /* initialize and Mutex for TI Crypt Engine */
|
#include <wolfcrypt/src/port/ti/ti-ccm.c> /* initialize and Mutex for TI Crypt Engine */
|
||||||
#include <wolfcrypt/src/port/ti/ti-hash.c> /* md5, sha1, sha224, sha256 */
|
#include <wolfcrypt/src/port/ti/ti-hash.c> /* md5, sha1, sha224, sha256 */
|
||||||
|
Reference in New Issue
Block a user