mirror of
https://github.com/wolfSSL/wolfssl.git
synced 2025-08-01 19:54:40 +02:00
Merge pull request #8964 from douzzer/20250703-linuxkm-fixes
20250703-linuxkm-fixes Note, final commit reviewed by @SparkiDev, and earlier commit reviewed by @philljj.
This commit is contained in:
@@ -126,6 +126,7 @@
|
|||||||
|
|
||||||
#if defined(__PIE__) && defined(CONFIG_ARM64)
|
#if defined(__PIE__) && defined(CONFIG_ARM64)
|
||||||
#define alt_cb_patch_nops my__alt_cb_patch_nops
|
#define alt_cb_patch_nops my__alt_cb_patch_nops
|
||||||
|
#define queued_spin_lock_slowpath my__queued_spin_lock_slowpath
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
@@ -705,20 +706,30 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_ARM64
|
#ifdef CONFIG_ARM64
|
||||||
#ifdef __PIE__
|
#ifdef __PIE__
|
||||||
/* alt_cb_patch_nops defined early to allow shimming in system
|
/* alt_cb_patch_nops and queued_spin_lock_slowpath are defined early
|
||||||
* headers, but now we need the native one.
|
* to allow shimming in system headers, but now we need the native
|
||||||
|
* ones.
|
||||||
*/
|
*/
|
||||||
#undef alt_cb_patch_nops
|
#undef alt_cb_patch_nops
|
||||||
typeof(my__alt_cb_patch_nops) *alt_cb_patch_nops;
|
typeof(my__alt_cb_patch_nops) *alt_cb_patch_nops;
|
||||||
|
#undef queued_spin_lock_slowpath
|
||||||
|
typeof(my__queued_spin_lock_slowpath) *queued_spin_lock_slowpath;
|
||||||
#else
|
#else
|
||||||
typeof(alt_cb_patch_nops) *alt_cb_patch_nops;
|
typeof(alt_cb_patch_nops) *alt_cb_patch_nops;
|
||||||
|
typeof(queued_spin_lock_slowpath) *queued_spin_lock_slowpath;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typeof(preempt_count) *preempt_count;
|
typeof(preempt_count) *preempt_count;
|
||||||
typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave;
|
#ifndef _raw_spin_lock_irqsave
|
||||||
typeof(_raw_spin_trylock) *_raw_spin_trylock;
|
typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave;
|
||||||
typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore;
|
#endif
|
||||||
|
#ifndef _raw_spin_trylock
|
||||||
|
typeof(_raw_spin_trylock) *_raw_spin_trylock;
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_unlock_irqrestore
|
||||||
|
typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore;
|
||||||
|
#endif
|
||||||
typeof(_cond_resched) *_cond_resched;
|
typeof(_cond_resched) *_cond_resched;
|
||||||
|
|
||||||
const void *_last_slot;
|
const void *_last_slot;
|
||||||
@@ -885,9 +896,19 @@
|
|||||||
|
|
||||||
#undef preempt_count /* just in case -- not a macro on x86. */
|
#undef preempt_count /* just in case -- not a macro on x86. */
|
||||||
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
|
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
|
||||||
#define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave)
|
|
||||||
#define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock)
|
#ifndef WOLFSSL_LINUXKM_USE_MUTEXES
|
||||||
#define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore)
|
#ifndef _raw_spin_lock_irqsave
|
||||||
|
#define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave)
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_trylock
|
||||||
|
#define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock)
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_unlock_irqrestore
|
||||||
|
#define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#define _cond_resched (wolfssl_linuxkm_get_pie_redirect_table()->_cond_resched)
|
#define _cond_resched (wolfssl_linuxkm_get_pie_redirect_table()->_cond_resched)
|
||||||
|
|
||||||
/* this is defined in linux/spinlock.h as an inline that calls the unshimmed
|
/* this is defined in linux/spinlock.h as an inline that calls the unshimmed
|
||||||
@@ -991,8 +1012,8 @@
|
|||||||
|
|
||||||
static inline int wc_LockMutex(wolfSSL_Mutex* m)
|
static inline int wc_LockMutex(wolfSSL_Mutex* m)
|
||||||
{
|
{
|
||||||
if (in_nmi() || in_hardirq() || in_softirq())
|
if (in_nmi() || hardirq_count() || in_softirq())
|
||||||
return BAD_STATE_E;
|
return -1;
|
||||||
mutex_lock(m);
|
mutex_lock(m);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -940,6 +940,7 @@ struct wc_swallow_the_semicolon
|
|||||||
#include <wolfssl/wolfcrypt/random.h>
|
#include <wolfssl/wolfcrypt/random.h>
|
||||||
|
|
||||||
struct wc_linuxkm_drbg_ctx {
|
struct wc_linuxkm_drbg_ctx {
|
||||||
|
size_t n_rngs;
|
||||||
struct wc_rng_inst {
|
struct wc_rng_inst {
|
||||||
wolfSSL_Atomic_Int lock;
|
wolfSSL_Atomic_Int lock;
|
||||||
WC_RNG rng;
|
WC_RNG rng;
|
||||||
@@ -951,7 +952,7 @@ static inline void wc_linuxkm_drbg_ctx_clear(struct wc_linuxkm_drbg_ctx * ctx)
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
if (ctx->rngs) {
|
if (ctx->rngs) {
|
||||||
for (i = 0; i < nr_cpu_ids; ++i) {
|
for (i = 0; i < ctx->n_rngs; ++i) {
|
||||||
if (ctx->rngs[i].lock != 0) {
|
if (ctx->rngs[i].lock != 0) {
|
||||||
/* better to leak than to crash. */
|
/* better to leak than to crash. */
|
||||||
pr_err("BUG: wc_linuxkm_drbg_ctx_clear called with DRBG #%d still locked.", i);
|
pr_err("BUG: wc_linuxkm_drbg_ctx_clear called with DRBG #%d still locked.", i);
|
||||||
@@ -961,6 +962,7 @@ static inline void wc_linuxkm_drbg_ctx_clear(struct wc_linuxkm_drbg_ctx * ctx)
|
|||||||
}
|
}
|
||||||
free(ctx->rngs);
|
free(ctx->rngs);
|
||||||
ctx->rngs = NULL;
|
ctx->rngs = NULL;
|
||||||
|
ctx->n_rngs = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@@ -976,12 +978,15 @@ static int wc_linuxkm_drbg_init_tfm(struct crypto_tfm *tfm)
|
|||||||
int need_reenable_vec = 0;
|
int need_reenable_vec = 0;
|
||||||
int can_sleep = (preempt_count() == 0);
|
int can_sleep = (preempt_count() == 0);
|
||||||
|
|
||||||
ctx->rngs = (struct wc_rng_inst *)malloc(sizeof(*ctx->rngs) * nr_cpu_ids);
|
ctx->n_rngs = max(4, nr_cpu_ids);
|
||||||
if (! ctx->rngs)
|
ctx->rngs = (struct wc_rng_inst *)malloc(sizeof(*ctx->rngs) * ctx->n_rngs);
|
||||||
|
if (! ctx->rngs) {
|
||||||
|
ctx->n_rngs = 0;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
XMEMSET(ctx->rngs, 0, sizeof(*ctx->rngs) * nr_cpu_ids);
|
}
|
||||||
|
XMEMSET(ctx->rngs, 0, sizeof(*ctx->rngs) * ctx->n_rngs);
|
||||||
|
|
||||||
for (i = 0; i < nr_cpu_ids; ++i) {
|
for (i = 0; i < ctx->n_rngs; ++i) {
|
||||||
ctx->rngs[i].lock = 0;
|
ctx->rngs[i].lock = 0;
|
||||||
if (wc_linuxkm_drbg_init_tfm_disable_vector_registers)
|
if (wc_linuxkm_drbg_init_tfm_disable_vector_registers)
|
||||||
need_reenable_vec = (DISABLE_VECTOR_REGISTERS() == 0);
|
need_reenable_vec = (DISABLE_VECTOR_REGISTERS() == 0);
|
||||||
@@ -1015,10 +1020,29 @@ static void wc_linuxkm_drbg_exit_tfm(struct crypto_tfm *tfm)
|
|||||||
|
|
||||||
static int wc_linuxkm_drbg_default_instance_registered = 0;
|
static int wc_linuxkm_drbg_default_instance_registered = 0;
|
||||||
|
|
||||||
|
/* get_drbg() uses atomic operations to get exclusive ownership of a DRBG
|
||||||
|
* without delay. It expects to be called in uninterruptible context, though
|
||||||
|
* works fine in any context. It starts by trying the DRBG matching the current
|
||||||
|
* CPU ID, and if that doesn't immediately succeed, it iterates upward until one
|
||||||
|
* succeeds. The first attempt will always succeed, even under intense load,
|
||||||
|
* unless there is or has recently been a reseed or mix-in operation competing
|
||||||
|
* with generators.
|
||||||
|
*
|
||||||
|
* Note that wc_linuxkm_drbg_init_tfm() allocates at least 4 DRBGs, regardless
|
||||||
|
* of nominal core count, to avoid stalling generators on unicore targets.
|
||||||
|
*/
|
||||||
|
|
||||||
static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
|
static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
|
||||||
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
|
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
|
||||||
int n, new_lock_value;
|
int n, new_lock_value;
|
||||||
|
|
||||||
|
/* check for mismatched handler or missing instance array. */
|
||||||
|
if ((tfm->base.__crt_alg->cra_init != wc_linuxkm_drbg_init_tfm) ||
|
||||||
|
(ctx->rngs == NULL))
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
|
||||||
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
|
||||||
if (tfm == crypto_default_rng) {
|
if (tfm == crypto_default_rng) {
|
||||||
@@ -1041,7 +1065,7 @@ static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
|
|||||||
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, new_lock_value, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
|
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, new_lock_value, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
|
||||||
return &ctx->rngs[n];
|
return &ctx->rngs[n];
|
||||||
++n;
|
++n;
|
||||||
if (n >= (int)nr_cpu_ids)
|
if (n >= (int)ctx->n_rngs)
|
||||||
n = 0;
|
n = 0;
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
}
|
}
|
||||||
@@ -1049,12 +1073,20 @@ static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
|
|||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* get_drbg_n() is used by bulk seed, mix-in, and reseed operations. It expects
|
||||||
|
* the caller to be able to wait until the requested DRBG is available.
|
||||||
|
*/
|
||||||
static inline struct wc_rng_inst *get_drbg_n(struct wc_linuxkm_drbg_ctx *ctx, int n) {
|
static inline struct wc_rng_inst *get_drbg_n(struct wc_linuxkm_drbg_ctx *ctx, int n) {
|
||||||
|
int can_sleep = (preempt_count() == 0);
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
int expected = 0;
|
int expected = 0;
|
||||||
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, 1, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
|
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, 1, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
|
||||||
return &ctx->rngs[n];
|
return &ctx->rngs[n];
|
||||||
cpu_relax();
|
if (can_sleep)
|
||||||
|
cond_resched();
|
||||||
|
else
|
||||||
|
cpu_relax();
|
||||||
}
|
}
|
||||||
|
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
@@ -1078,17 +1110,18 @@ static int wc_linuxkm_drbg_generate(struct crypto_rng *tfm,
|
|||||||
u8 *dst, unsigned int dlen)
|
u8 *dst, unsigned int dlen)
|
||||||
{
|
{
|
||||||
int ret, retried = 0;
|
int ret, retried = 0;
|
||||||
/* Note, core is not necessarily locked on entry, so the actual core ID may
|
int need_fpu_restore;
|
||||||
* change while executing, hence the lock.
|
|
||||||
*
|
|
||||||
* The lock is also needed to coordinate with wc_linuxkm_drbg_seed(), which
|
|
||||||
* seeds all instances.
|
|
||||||
*/
|
|
||||||
struct wc_rng_inst *drbg = get_drbg(tfm);
|
struct wc_rng_inst *drbg = get_drbg(tfm);
|
||||||
|
|
||||||
|
if (! drbg) {
|
||||||
|
pr_err_once("BUG: get_drbg() failed.");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
/* for the default RNG, make sure we don't cache an underlying SHA256
|
/* for the default RNG, make sure we don't cache an underlying SHA256
|
||||||
* method that uses vector insns (forbidden from irq handlers).
|
* method that uses vector insns (forbidden from irq handlers).
|
||||||
*/
|
*/
|
||||||
int need_fpu_restore = (tfm == crypto_default_rng) ? (DISABLE_VECTOR_REGISTERS() == 0) : 0;
|
need_fpu_restore = (tfm == crypto_default_rng) ? (DISABLE_VECTOR_REGISTERS() == 0) : 0;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
|
|
||||||
@@ -1138,6 +1171,13 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
|
|||||||
int ret;
|
int ret;
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
|
if ((tfm->base.__crt_alg->cra_init != wc_linuxkm_drbg_init_tfm) ||
|
||||||
|
(ctx->rngs == NULL))
|
||||||
|
{
|
||||||
|
pr_err_once("BUG: mismatched tfm.");
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
if (slen == 0)
|
if (slen == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -1146,7 +1186,10 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
XMEMCPY(seed_copy + 2, seed, slen);
|
XMEMCPY(seed_copy + 2, seed, slen);
|
||||||
|
|
||||||
for (n = nr_cpu_ids - 1; n >= 0; --n) {
|
/* this iteration counts down, whereas the iteration in get_drbg() counts
|
||||||
|
* up, to assure they can't possibly phase-lock to each other.
|
||||||
|
*/
|
||||||
|
for (n = ctx->n_rngs - 1; n >= 0; --n) {
|
||||||
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
|
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
|
||||||
|
|
||||||
/* perturb the seed with the CPU ID, so that no DRBG has the exact same
|
/* perturb the seed with the CPU ID, so that no DRBG has the exact same
|
||||||
@@ -1249,7 +1292,13 @@ static inline struct crypto_rng *get_crypto_default_rng(void) {
|
|||||||
|
|
||||||
static inline struct wc_linuxkm_drbg_ctx *get_default_drbg_ctx(void) {
|
static inline struct wc_linuxkm_drbg_ctx *get_default_drbg_ctx(void) {
|
||||||
struct crypto_rng *current_crypto_default_rng = get_crypto_default_rng();
|
struct crypto_rng *current_crypto_default_rng = get_crypto_default_rng();
|
||||||
return current_crypto_default_rng ? (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(current_crypto_default_rng) : NULL;
|
struct wc_linuxkm_drbg_ctx *ctx = (current_crypto_default_rng ? (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(current_crypto_default_rng) : NULL);
|
||||||
|
if (ctx && (! ctx->rngs)) {
|
||||||
|
pr_err_once("BUG: get_default_drbg_ctx() found null ctx->rngs.");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wc__get_random_bytes(void *buf, size_t len)
|
static int wc__get_random_bytes(void *buf, size_t len)
|
||||||
@@ -1259,8 +1308,9 @@ static int wc__get_random_bytes(void *buf, size_t len)
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
else {
|
else {
|
||||||
int ret = crypto_rng_get_bytes(current_crypto_default_rng, buf, len);
|
int ret = crypto_rng_get_bytes(current_crypto_default_rng, buf, len);
|
||||||
if (ret)
|
if (ret) {
|
||||||
pr_warn("BUG: wc_get_random_bytes falling through to native get_random_bytes with wc_linuxkm_drbg_default_instance_registered, ret=%d.", ret);
|
pr_warn("BUG: wc_get_random_bytes falling through to native get_random_bytes with wc_linuxkm_drbg_default_instance_registered, ret=%d.", ret);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
__builtin_unreachable();
|
__builtin_unreachable();
|
||||||
@@ -1382,7 +1432,7 @@ static int wc_mix_pool_bytes(const void *buf, size_t len) {
|
|||||||
if (! (ctx = get_default_drbg_ctx()))
|
if (! (ctx = get_default_drbg_ctx()))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
for (n = nr_cpu_ids - 1; n >= 0; --n) {
|
for (n = ctx->n_rngs - 1; n >= 0; --n) {
|
||||||
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
|
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
|
||||||
int V_offset = 0;
|
int V_offset = 0;
|
||||||
|
|
||||||
@@ -1406,7 +1456,7 @@ static int wc_crng_reseed(void) {
|
|||||||
if (! ctx)
|
if (! ctx)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
for (n = nr_cpu_ids - 1; n >= 0; --n) {
|
for (n = ctx->n_rngs - 1; n >= 0; --n) {
|
||||||
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
|
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
|
||||||
((struct DRBG_internal *)drbg->rng.drbg)->reseedCtr = WC_RESEED_INTERVAL;
|
((struct DRBG_internal *)drbg->rng.drbg)->reseedCtr = WC_RESEED_INTERVAL;
|
||||||
if (can_sleep) {
|
if (can_sleep) {
|
||||||
@@ -1786,6 +1836,7 @@ static int wc_linuxkm_drbg_startup(void)
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
pr_err("ERROR: wolfssl_linuxkm_register_random_bytes_handlers() failed: %d\n", ret);
|
pr_err("ERROR: wolfssl_linuxkm_register_random_bytes_handlers() failed: %d\n", ret);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#elif defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES)
|
#elif defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES)
|
||||||
@@ -1797,6 +1848,7 @@ static int wc_linuxkm_drbg_startup(void)
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
pr_err("ERROR: wc_get_random_bytes_kprobe installation failed: %d\n", ret);
|
pr_err("ERROR: wc_get_random_bytes_kprobe installation failed: %d\n", ret);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE
|
#ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE
|
||||||
@@ -1807,6 +1859,7 @@ static int wc_linuxkm_drbg_startup(void)
|
|||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
pr_err("ERROR: wc_get_random_bytes_user_kprobe installation failed: %d\n", ret);
|
pr_err("ERROR: wc_get_random_bytes_user_kprobe installation failed: %d\n", ret);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
#endif /* WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE */
|
#endif /* WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE */
|
||||||
|
|
||||||
|
@@ -658,13 +658,20 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
|
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
|
||||||
|
#ifndef _raw_spin_lock_irqsave
|
||||||
wolfssl_linuxkm_pie_redirect_table._raw_spin_lock_irqsave = _raw_spin_lock_irqsave;
|
wolfssl_linuxkm_pie_redirect_table._raw_spin_lock_irqsave = _raw_spin_lock_irqsave;
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_trylock
|
||||||
wolfssl_linuxkm_pie_redirect_table._raw_spin_trylock = _raw_spin_trylock;
|
wolfssl_linuxkm_pie_redirect_table._raw_spin_trylock = _raw_spin_trylock;
|
||||||
|
#endif
|
||||||
|
#ifndef _raw_spin_unlock_irqrestore
|
||||||
wolfssl_linuxkm_pie_redirect_table._raw_spin_unlock_irqrestore = _raw_spin_unlock_irqrestore;
|
wolfssl_linuxkm_pie_redirect_table._raw_spin_unlock_irqrestore = _raw_spin_unlock_irqrestore;
|
||||||
|
#endif
|
||||||
wolfssl_linuxkm_pie_redirect_table._cond_resched = _cond_resched;
|
wolfssl_linuxkm_pie_redirect_table._cond_resched = _cond_resched;
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64
|
#ifdef CONFIG_ARM64
|
||||||
wolfssl_linuxkm_pie_redirect_table.alt_cb_patch_nops = alt_cb_patch_nops;
|
wolfssl_linuxkm_pie_redirect_table.alt_cb_patch_nops = alt_cb_patch_nops;
|
||||||
|
wolfssl_linuxkm_pie_redirect_table.queued_spin_lock_slowpath = queued_spin_lock_slowpath;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* runtime assert that the table has no null slots after initialization. */
|
/* runtime assert that the table has no null slots after initialization. */
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
--- ./drivers/char/random.c.dist 2024-01-19 16:25:03.754138321 -0600
|
--- ./drivers/char/random.c.dist 2024-01-19 16:25:03.754138321 -0600
|
||||||
+++ ./drivers/char/random.c 2025-07-02 10:45:31.769041473 -0500
|
+++ ./drivers/char/random.c 2025-07-03 15:51:24.282595676 -0500
|
||||||
@@ -60,6 +60,260 @@
|
@@ -60,6 +60,260 @@
|
||||||
#include <asm/irq_regs.h>
|
#include <asm/irq_regs.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
@@ -426,7 +426,7 @@
|
|||||||
+ /* fall through to reseed native crng too. */
|
+ /* fall through to reseed native crng too. */
|
||||||
+ if (call_crng_reseed_cb() == 0) {
|
+ if (call_crng_reseed_cb() == 0) {
|
||||||
+ if (crng_ready())
|
+ if (crng_ready())
|
||||||
+ crng_reseed(NULL);
|
+ crng_reseed();
|
||||||
+ return 0;
|
+ return 0;
|
||||||
+ }
|
+ }
|
||||||
+#endif
|
+#endif
|
||||||
|
@@ -1494,13 +1494,23 @@ static wolfSSL_Mutex entropy_mutex WOLFSSL_MUTEX_INITIALIZER_CLAUSE(entropy_mute
|
|||||||
int wc_Entropy_Get(int bits, unsigned char* entropy, word32 len)
|
int wc_Entropy_Get(int bits, unsigned char* entropy, word32 len)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
#ifdef WOLFSSL_SMALL_STACK
|
||||||
|
byte *noise = NULL;
|
||||||
|
#else
|
||||||
byte noise[MAX_NOISE_CNT];
|
byte noise[MAX_NOISE_CNT];
|
||||||
|
#endif
|
||||||
/* Noise length is the number of 8 byte samples required to get the bits of
|
/* Noise length is the number of 8 byte samples required to get the bits of
|
||||||
* entropy requested. */
|
* entropy requested. */
|
||||||
int noise_len = (bits + ENTROPY_EXTRA) / ENTROPY_MIN;
|
int noise_len = (bits + ENTROPY_EXTRA) / ENTROPY_MIN;
|
||||||
|
|
||||||
|
#ifdef WOLFSSL_SMALL_STACK
|
||||||
|
noise = (byte *)XMALLOC(MAX_NOISE_CNT, NULL, DYNAMIC_TYPE_TMP_BUFFER);
|
||||||
|
if (noise == NULL)
|
||||||
|
return MEMORY_E;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Lock the mutex as collection uses globals. */
|
/* Lock the mutex as collection uses globals. */
|
||||||
if (wc_LockMutex(&entropy_mutex) != 0) {
|
if ((ret == 0) && (wc_LockMutex(&entropy_mutex) != 0)) {
|
||||||
ret = BAD_MUTEX_E;
|
ret = BAD_MUTEX_E;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1558,6 +1568,10 @@ int wc_Entropy_Get(int bits, unsigned char* entropy, word32 len)
|
|||||||
wc_UnLockMutex(&entropy_mutex);
|
wc_UnLockMutex(&entropy_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef WOLFSSL_SMALL_STACK
|
||||||
|
XFREE(noise, NULL, DYNAMIC_TYPE_TMP_BUFFER);
|
||||||
|
#endif
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -26,6 +26,9 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <wolfssl/wolfcrypt/cpuid.h>
|
#include <wolfssl/wolfcrypt/cpuid.h>
|
||||||
|
#ifdef HAVE_ENTROPY_MEMUSE
|
||||||
|
#include <wolfssl/wolfcrypt/random.h>
|
||||||
|
#endif
|
||||||
#ifdef HAVE_ECC
|
#ifdef HAVE_ECC
|
||||||
#include <wolfssl/wolfcrypt/ecc.h>
|
#include <wolfssl/wolfcrypt/ecc.h>
|
||||||
#endif
|
#endif
|
||||||
@@ -4628,4 +4631,10 @@ noinstr void my__alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
|
|||||||
return (wolfssl_linuxkm_get_pie_redirect_table()->
|
return (wolfssl_linuxkm_get_pie_redirect_table()->
|
||||||
alt_cb_patch_nops)(alt, origptr, updptr, nr_inst);
|
alt_cb_patch_nops)(alt, origptr, updptr, nr_inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void my__queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||||
|
{
|
||||||
|
return (wolfssl_linuxkm_get_pie_redirect_table()->
|
||||||
|
queued_spin_lock_slowpath)(lock, val);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
Reference in New Issue
Block a user