mirror of
https://github.com/wolfSSL/wolfssl.git
synced 2025-08-02 20:24:39 +02:00
linuxkm/linuxkm_wc_port.h, linuxkm/module_hooks.c, and wolfcrypt/src/wc_port.c: fixes for spinlocks on CONFIG_ARM64;
wolfcrypt/src/wc_port.c: include random.h, for Entropy_Init().
This commit is contained in:
@@ -126,6 +126,7 @@
|
|||||||
|
|
||||||
#if defined(__PIE__) && defined(CONFIG_ARM64)
|
#if defined(__PIE__) && defined(CONFIG_ARM64)
|
||||||
#define alt_cb_patch_nops my__alt_cb_patch_nops
|
#define alt_cb_patch_nops my__alt_cb_patch_nops
|
||||||
|
#define queued_spin_lock_slowpath my__queued_spin_lock_slowpath
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
@@ -705,20 +706,30 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_ARM64
|
#ifdef CONFIG_ARM64
|
||||||
#ifdef __PIE__
|
#ifdef __PIE__
|
||||||
/* alt_cb_patch_nops defined early to allow shimming in system
|
/* alt_cb_patch_nops and queued_spin_lock_slowpath are defined early
|
||||||
* headers, but now we need the native one.
|
* to allow shimming in system headers, but now we need the native
|
||||||
|
* ones.
|
||||||
*/
|
*/
|
||||||
#undef alt_cb_patch_nops
|
#undef alt_cb_patch_nops
|
||||||
typeof(my__alt_cb_patch_nops) *alt_cb_patch_nops;
|
typeof(my__alt_cb_patch_nops) *alt_cb_patch_nops;
|
||||||
|
#undef queued_spin_lock_slowpath
|
||||||
|
typeof(my__queued_spin_lock_slowpath) *queued_spin_lock_slowpath;
|
||||||
#else
|
#else
|
||||||
typeof(alt_cb_patch_nops) *alt_cb_patch_nops;
|
typeof(alt_cb_patch_nops) *alt_cb_patch_nops;
|
||||||
|
typeof(queued_spin_lock_slowpath) *queued_spin_lock_slowpath;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typeof(preempt_count) *preempt_count;
|
typeof(preempt_count) *preempt_count;
|
||||||
typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave;
|
#ifndef _raw_spin_lock_irqsave
|
||||||
typeof(_raw_spin_trylock) *_raw_spin_trylock;
|
typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave;
|
||||||
typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore;
|
#endif
|
||||||
|
#ifndef _raw_spin_trylock
|
||||||
|
typeof(_raw_spin_trylock) *_raw_spin_trylock;
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_unlock_irqrestore
|
||||||
|
typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore;
|
||||||
|
#endif
|
||||||
typeof(_cond_resched) *_cond_resched;
|
typeof(_cond_resched) *_cond_resched;
|
||||||
|
|
||||||
const void *_last_slot;
|
const void *_last_slot;
|
||||||
@@ -885,9 +896,19 @@
|
|||||||
|
|
||||||
#undef preempt_count /* just in case -- not a macro on x86. */
|
#undef preempt_count /* just in case -- not a macro on x86. */
|
||||||
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
|
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
|
||||||
#define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave)
|
|
||||||
#define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock)
|
#ifndef WOLFSSL_LINUXKM_USE_MUTEXES
|
||||||
#define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore)
|
#ifndef _raw_spin_lock_irqsave
|
||||||
|
#define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave)
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_trylock
|
||||||
|
#define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock)
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_unlock_irqrestore
|
||||||
|
#define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#define _cond_resched (wolfssl_linuxkm_get_pie_redirect_table()->_cond_resched)
|
#define _cond_resched (wolfssl_linuxkm_get_pie_redirect_table()->_cond_resched)
|
||||||
|
|
||||||
/* this is defined in linux/spinlock.h as an inline that calls the unshimmed
|
/* this is defined in linux/spinlock.h as an inline that calls the unshimmed
|
||||||
@@ -991,8 +1012,8 @@
|
|||||||
|
|
||||||
static inline int wc_LockMutex(wolfSSL_Mutex* m)
|
static inline int wc_LockMutex(wolfSSL_Mutex* m)
|
||||||
{
|
{
|
||||||
if (in_nmi() || in_hardirq() || in_softirq())
|
if (in_nmi() || hardirq_count() || in_softirq())
|
||||||
return BAD_STATE_E;
|
return -1;
|
||||||
mutex_lock(m);
|
mutex_lock(m);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -658,13 +658,20 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
|
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
|
||||||
|
#ifndef _raw_spin_lock_irqsave
|
||||||
wolfssl_linuxkm_pie_redirect_table._raw_spin_lock_irqsave = _raw_spin_lock_irqsave;
|
wolfssl_linuxkm_pie_redirect_table._raw_spin_lock_irqsave = _raw_spin_lock_irqsave;
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_trylock
|
||||||
wolfssl_linuxkm_pie_redirect_table._raw_spin_trylock = _raw_spin_trylock;
|
wolfssl_linuxkm_pie_redirect_table._raw_spin_trylock = _raw_spin_trylock;
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_unlock_irqrestore
|
||||||
wolfssl_linuxkm_pie_redirect_table._raw_spin_unlock_irqrestore = _raw_spin_unlock_irqrestore;
|
wolfssl_linuxkm_pie_redirect_table._raw_spin_unlock_irqrestore = _raw_spin_unlock_irqrestore;
|
||||||
|
#endif
|
||||||
wolfssl_linuxkm_pie_redirect_table._cond_resched = _cond_resched;
|
wolfssl_linuxkm_pie_redirect_table._cond_resched = _cond_resched;
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64
|
#ifdef CONFIG_ARM64
|
||||||
wolfssl_linuxkm_pie_redirect_table.alt_cb_patch_nops = alt_cb_patch_nops;
|
wolfssl_linuxkm_pie_redirect_table.alt_cb_patch_nops = alt_cb_patch_nops;
|
||||||
|
wolfssl_linuxkm_pie_redirect_table.queued_spin_lock_slowpath = queued_spin_lock_slowpath;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* runtime assert that the table has no null slots after initialization. */
|
/* runtime assert that the table has no null slots after initialization. */
|
||||||
|
@@ -26,6 +26,9 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <wolfssl/wolfcrypt/cpuid.h>
|
#include <wolfssl/wolfcrypt/cpuid.h>
|
||||||
|
#ifdef HAVE_ENTROPY_MEMUSE
|
||||||
|
#include <wolfssl/wolfcrypt/random.h>
|
||||||
|
#endif
|
||||||
#ifdef HAVE_ECC
|
#ifdef HAVE_ECC
|
||||||
#include <wolfssl/wolfcrypt/ecc.h>
|
#include <wolfssl/wolfcrypt/ecc.h>
|
||||||
#endif
|
#endif
|
||||||
@@ -4628,4 +4631,10 @@ noinstr void my__alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
|
|||||||
return (wolfssl_linuxkm_get_pie_redirect_table()->
|
return (wolfssl_linuxkm_get_pie_redirect_table()->
|
||||||
alt_cb_patch_nops)(alt, origptr, updptr, nr_inst);
|
alt_cb_patch_nops)(alt, origptr, updptr, nr_inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void my__queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||||
|
{
|
||||||
|
return (wolfssl_linuxkm_get_pie_redirect_table()->
|
||||||
|
queued_spin_lock_slowpath)(lock, val);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user