linuxkm/lkcapi_sha_glue.c: implement mutex-free sync mechanism for wc_linuxkm_drbg_ctx in new get_drbg(), get_drbg_n(), and put_drbg();

linuxkm/x86_vector_register_glue.c: implement support for WC_FPU_INHIBITED_FLAG, and an `int inhibit_p` argument to save_vector_registers_x86();

wolfcrypt/src/random.c: implement linuxkm support for RDSEED and HAVE_ENTROPY_MEMUSE;

wolfssl/wolfcrypt/error-crypt.h and wolfcrypt/src/error.c: add WC_ACCEL_INHIBIT_E "Crypto acceleration is currently inhibited";

linuxkm/module_hooks.c and linuxkm/x86_vector_register_glue.c: remove broken and bit-rotten WOLFSSL_COMMERCIAL_LICENSE and LINUXKM_FPU_STATES_FOLLOW_THREADS code paths.
This commit is contained in:
Daniel Pouzzner
2025-06-20 13:45:04 -05:00
parent 8cc2ba7153
commit b25d484a4e
8 changed files with 224 additions and 269 deletions

View File

@@ -294,7 +294,6 @@ LIBWOLFSSL_VERSION_GIT_ORIGIN
LIBWOLFSSL_VERSION_GIT_SHORT_HASH
LIBWOLFSSL_VERSION_GIT_TAG
LINUXKM_DONT_FORCE_FIPS_ENABLED
LINUXKM_FPU_STATES_FOLLOW_THREADS
LINUXKM_LKCAPI_PRIORITY_ALLOW_MASKING
LINUX_CYCLE_COUNT
LINUX_RUSAGE_UTIME
@@ -652,7 +651,6 @@ WOLFSSL_CHECK_MEM_ZERO
WOLFSSL_CHIBIOS
WOLFSSL_CLANG_TIDY
WOLFSSL_CLIENT_EXAMPLE
WOLFSSL_COMMERCIAL_LICENSE
WOLFSSL_CONTIKI
WOLFSSL_CRL_ALLOW_MISSING_CDP
WOLFSSL_CUSTOM_CONFIG

View File

@@ -367,7 +367,7 @@
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states(void);
extern void free_wolfcrypt_linuxkm_fpu_states(void);
extern __must_check int can_save_vector_registers_x86(void);
extern __must_check int save_vector_registers_x86(void);
extern __must_check int save_vector_registers_x86(int inhibit_p);
extern void restore_vector_registers_x86(void);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
@@ -383,11 +383,11 @@
#endif
#endif
#ifndef SAVE_VECTOR_REGISTERS
#define SAVE_VECTOR_REGISTERS(fail_clause) { \
int _svr_ret = save_vector_registers_x86(); \
if (_svr_ret != 0) { \
fail_clause \
} \
#define SAVE_VECTOR_REGISTERS(fail_clause) { \
int _svr_ret = save_vector_registers_x86(0); \
if (_svr_ret != 0) { \
fail_clause \
} \
}
#endif
#ifndef SAVE_VECTOR_REGISTERS2
@@ -395,17 +395,24 @@
#define SAVE_VECTOR_REGISTERS2() ({ \
int _fuzzer_ret = SAVE_VECTOR_REGISTERS2_fuzzer(); \
(_fuzzer_ret == 0) ? \
save_vector_registers_x86() : \
save_vector_registers_x86(0) : \
_fuzzer_ret; \
})
#else
#define SAVE_VECTOR_REGISTERS2() save_vector_registers_x86()
#define SAVE_VECTOR_REGISTERS2() save_vector_registers_x86(0)
#endif
#endif
#ifndef RESTORE_VECTOR_REGISTERS
#define RESTORE_VECTOR_REGISTERS() restore_vector_registers_x86()
#endif
#ifndef DISABLE_VECTOR_REGISTERS
#define DISABLE_VECTOR_REGISTERS() save_vector_registers_x86(1)
#endif
#ifndef REENABLE_VECTOR_REGISTERS
#define REENABLE_VECTOR_REGISTERS() restore_vector_registers_x86()
#endif
#elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
#error kernel module ARM SIMD is not yet tested or usable.

View File

@@ -941,7 +941,7 @@ struct wc_swallow_the_semicolon
struct wc_linuxkm_drbg_ctx {
struct wc_rng_inst {
wolfSSL_Mutex lock;
wolfSSL_Atomic_Int lock;
WC_RNG rng;
} *rngs; /* one per CPU ID */
};
@@ -952,7 +952,6 @@ static inline void wc_linuxkm_drbg_ctx_clear(struct wc_linuxkm_drbg_ctx * ctx)
if (ctx->rngs) {
for (i = 0; i < nr_cpu_ids; ++i) {
(void)wc_FreeMutex(&ctx->rngs[i].lock);
wc_FreeRng(&ctx->rngs[i].rng);
}
free(ctx->rngs);
@@ -974,11 +973,7 @@ static int wc_linuxkm_drbg_init_tfm(struct crypto_tfm *tfm)
XMEMSET(ctx->rngs, 0, sizeof(*ctx->rngs) * nr_cpu_ids);
for (i = 0; i < nr_cpu_ids; ++i) {
ret = wc_InitMutex(&ctx->rngs[i].lock);
if (ret != 0) {
ret = -EINVAL;
break;
}
ctx->rngs[i].lock = 0;
/* Note the new DRBG instance is seeded, and later reseeded, from system
* get_random_bytes() via wc_GenerateSeed().
@@ -1006,39 +1001,96 @@ static void wc_linuxkm_drbg_exit_tfm(struct crypto_tfm *tfm)
return;
}
static int wc_linuxkm_drbg_default_instance_registered = 0;
static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
int n;
#ifdef LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
int disabled_vector_registers = 0;
if (tfm == crypto_default_rng) {
/* The persistent default stdrng needs to use interrupt-safe mechanisms,
* because we use it as the back end for get_random_bytes(). With
* SMALL_STACK_CACHE, this only affects the ephemeral SHA256 instances used
* by the seed generator, but without SMALL_STACK_CACHE it affects every
* DRBG call.
*/
int ret = DISABLE_VECTOR_REGISTERS();
if (ret != 0) {
pr_err("get_drbg DISABLE_VECTOR_REGISTERS returned %d", ret);
} else {
disabled_vector_registers = 1;
}
}
#endif
n = raw_smp_processor_id();
for (;;) {
if (likely(__atomic_test_and_set(&ctx->rngs[n].lock,__ATOMIC_ACQUIRE))) {
#ifdef LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
if (disabled_vector_registers)
ctx->rngs[n].lock = 2;
else
ctx->rngs[n].lock = 1; /* be sure. */
#endif
return &ctx->rngs[n];
}
++n;
if (n >= (int)nr_cpu_ids)
n = 0;
}
__builtin_unreachable();
}
static inline struct wc_rng_inst *get_drbg_n(struct wc_linuxkm_drbg_ctx *ctx, int n) {
for (;;) {
if (likely(__atomic_test_and_set(&ctx->rngs[n].lock,__ATOMIC_ACQUIRE)))
return &ctx->rngs[n];
cond_resched();
}
__builtin_unreachable();
}
static inline void put_drbg(struct wc_rng_inst *drbg) {
#ifdef LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
if (drbg->lock == 2)
REENABLE_VECTOR_REGISTERS();
#endif
__atomic_store_n(&(drbg->lock),0,__ATOMIC_RELEASE);
}
static int wc_linuxkm_drbg_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
int ret;
/* Note, core is not locked, so the actual core ID may change while
* executing, hence the mutex.
* The mutex is also needed to coordinate with wc_linuxkm_drbg_seed(), which
* executing, hence the lock.
* The lock is also needed to coordinate with wc_linuxkm_drbg_seed(), which
* seeds all instances.
*/
int my_cpu = raw_smp_processor_id();
wolfSSL_Mutex *lock = &ctx->rngs[my_cpu].lock;
WC_RNG *rng = &ctx->rngs[my_cpu].rng;
if (wc_LockMutex(lock) != 0)
return -EINVAL;
struct wc_rng_inst *drbg = get_drbg(tfm);
if (slen > 0) {
ret = wc_RNG_DRBG_Reseed(rng, src, slen);
ret = wc_RNG_DRBG_Reseed(&drbg->rng, src, slen);
if (ret != 0) {
ret = -EINVAL;
goto out;
}
}
ret = wc_RNG_GenerateBlock(rng, dst, dlen);
ret = wc_RNG_GenerateBlock(&drbg->rng, dst, dlen);
if (ret != 0)
ret = -EINVAL;
out:
wc_UnLockMutex(lock);
put_drbg(drbg);
return ret;
}
@@ -1049,7 +1101,7 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
u8 *seed_copy = NULL;
int ret;
unsigned int i;
int n;
if (slen == 0)
return 0;
@@ -1059,25 +1111,21 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
return -ENOMEM;
XMEMCPY(seed_copy + 2, seed, slen);
for (i = 0; i < nr_cpu_ids; ++i) {
wolfSSL_Mutex *lock = &ctx->rngs[i].lock;
WC_RNG *rng = &ctx->rngs[i].rng;
for (n = nr_cpu_ids - 1; n >= 0; --n) {
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
/* perturb the seed with the CPU ID, so that no DRBG has the exact same
* seed.
*/
seed_copy[0] = (u8)(i >> 8);
seed_copy[1] = (u8)i;
seed_copy[0] = (u8)(n >> 8);
seed_copy[1] = (u8)n;
if (wc_LockMutex(lock) != 0)
return -EINVAL;
ret = wc_RNG_DRBG_Reseed(rng, seed_copy, slen + 2);
ret = wc_RNG_DRBG_Reseed(&drbg->rng, seed_copy, slen + 2);
if (ret != 0) {
ret = -EINVAL;
}
wc_UnLockMutex(lock);
put_drbg(drbg);
if (ret != 0)
break;
@@ -1103,7 +1151,6 @@ static struct rng_alg wc_linuxkm_drbg = {
}
};
static int wc_linuxkm_drbg_loaded = 0;
static int wc_linuxkm_drbg_default_instance_registered = 0;
WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
{
@@ -1222,7 +1269,18 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
return ret;
}
#ifdef DISABLE_VECTOR_REGISTERS
ret = DISABLE_VECTOR_REGISTERS();
if (ret != 0) {
pr_err("DISABLE_VECTOR_REGISTERS() returned %d", ret);
return -EINVAL;
}
#endif
ret = crypto_get_default_rng();
#ifdef DISABLE_VECTOR_REGISTERS
REENABLE_VECTOR_REGISTERS();
#endif
if (ret) {
pr_err("crypto_get_default_rng returned %d", ret);
return ret;

View File

@@ -20,12 +20,8 @@
*/
#ifndef WOLFSSL_LICENSE
#ifdef WOLFSSL_COMMERCIAL_LICENSE
#define WOLFSSL_LICENSE "wolfSSL Commercial"
#else
#define WOLFSSL_LICENSE "GPL v2"
#endif
#endif
#define WOLFSSL_LINUXKM_NEED_LINUX_CURRENT
@@ -454,22 +450,6 @@ static struct task_struct *my_get_current_thread(void) {
return get_current();
}
#if defined(WOLFSSL_LINUXKM_SIMD_X86) && defined(WOLFSSL_COMMERCIAL_LICENSE)
/* ditto for fpregs_lock/fpregs_unlock */
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
static void my_fpregs_lock(void) {
fpregs_lock();
}
static void my_fpregs_unlock(void) {
fpregs_unlock();
}
#endif /* WOLFSSL_LINUXKM_SIMD_X86 && WOLFSSL_COMMERCIAL_LICENSE */
#endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */
static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
memset(
&wolfssl_linuxkm_pie_redirect_table,

View File

@@ -41,30 +41,9 @@ struct wc_thread_fpu_count_ent {
};
struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_states = NULL;
#ifdef WOLFSSL_COMMERCIAL_LICENSE
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
#error WOLFSSL_COMMERCIAL_LICENSE requires LINUXKM_FPU_STATES_FOLLOW_THREADS
#endif
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wnested-externs"
/* avoid dependence on "alternatives_patched" and "xfd_validate_state()". */
#undef CONFIG_X86_DEBUG_FPU
#include "../kernel/fpu/internal.h"
#include "../kernel/fpu/xstate.h"
#pragma GCC diagnostic pop
static union wc_linuxkm_fpu_savebuf {
byte buf[1024]; /* must be 64-byte-aligned */
struct fpstate fpstate;
} *wc_linuxkm_fpu_savebufs = NULL;
#endif /* WOLFSSL_COMMERCIAL_LICENSE */
#define WC_FPU_COUNT_MASK 0x7fffffffU
#define WC_FPU_SAVED_MASK 0x80000000U
#define WC_FPU_COUNT_MASK 0x3fffffffU
#define WC_FPU_INHIBITED_FLAG 0x40000000U
#define WC_FPU_ALREADY_FLAG 0x80000000U
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
{
@@ -83,14 +62,7 @@ WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
#endif
}
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
if (nr_cpu_ids >= 16)
wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids * 2;
else
wc_linuxkm_fpu_states_n_tracked = 32;
#else
wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids;
#endif
wc_linuxkm_fpu_states =
(struct wc_thread_fpu_count_ent *)malloc(
@@ -106,35 +78,6 @@ WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
memset(wc_linuxkm_fpu_states, 0, wc_linuxkm_fpu_states_n_tracked
* sizeof(wc_linuxkm_fpu_states[0]));
#ifdef WOLFSSL_COMMERCIAL_LICENSE
wc_linuxkm_fpu_savebufs = (union wc_linuxkm_fpu_savebuf *)malloc(
wc_linuxkm_fpu_states_n_tracked * sizeof(*wc_linuxkm_fpu_savebufs));
if (! wc_linuxkm_fpu_savebufs) {
pr_err("allocation of %lu bytes for "
"wc_linuxkm_fpu_savebufs failed.\n",
WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
* sizeof(*wc_linuxkm_fpu_savebufs));
free(wc_linuxkm_fpu_states);
wc_linuxkm_fpu_states = NULL;
return MEMORY_E;
}
if ((uintptr_t)wc_linuxkm_fpu_savebufs
& (WC_LINUXKM_ROUND_UP_P_OF_2(sizeof(*wc_linuxkm_fpu_savebufs)) - 1))
{
pr_err("allocation of %lu bytes for "
"wc_linuxkm_fpu_savebufs allocated with wrong alignment 0x%lx.\n",
WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
* sizeof(*wc_linuxkm_fpu_savebufs),
(uintptr_t)wc_linuxkm_fpu_savebufs);
free(wc_linuxkm_fpu_savebufs);
wc_linuxkm_fpu_savebufs = NULL;
free(wc_linuxkm_fpu_states);
wc_linuxkm_fpu_states = NULL;
return MEMORY_E;
}
#endif
return 0;
}
@@ -163,82 +106,10 @@ void free_wolfcrypt_linuxkm_fpu_states(void) {
}
}
#ifdef WOLFSSL_COMMERCIAL_LICENSE
free(wc_linuxkm_fpu_savebufs);
wc_linuxkm_fpu_savebufs = NULL;
#endif
free(wc_linuxkm_fpu_states);
wc_linuxkm_fpu_states = NULL;
}
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
/* legacy thread-local storage facility for tracking recursive fpu
* pushing/popping
*/
static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(
int create_p, int assume_fpu_began)
{
struct wc_thread_fpu_count_ent *i, *i_endptr, *i_empty;
pid_t my_pid = task_pid_nr(current), i_pid;
(void)assume_fpu_began;
{
static int _warned_on_null = 0;
if (wc_linuxkm_fpu_states == NULL)
{
#ifdef HAVE_FIPS
/* FIPS needs to use SHA256 for the core verify HMAC, before
* reaching the regular wolfCrypt_Init() logic. to break the
* dependency loop on intelasm builds, we allocate here.
* this is not thread-safe and doesn't need to be.
*/
if ((! create_p) || (allocate_wolfcrypt_linuxkm_fpu_states() != 0))
#endif
{
if (_warned_on_null == 0) {
pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
" before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
_warned_on_null = 1;
}
return NULL;
}
}
}
i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
for (;;) {
for (i = wc_linuxkm_fpu_states,
i_empty = NULL;
i < i_endptr;
++i)
{
i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
if (i_pid == my_pid)
return i;
if ((i_empty == NULL) && (i_pid == 0))
i_empty = i;
}
if ((i_empty == NULL) || (! create_p))
return NULL;
i_pid = 0;
if (__atomic_compare_exchange_n(
&(i_empty->pid),
&i_pid,
my_pid,
0 /* weak */,
__ATOMIC_SEQ_CST /* success_memmodel */,
__ATOMIC_SEQ_CST /* failure_memmodel */))
{
return i_empty;
}
}
}
#else /* !LINUXKM_FPU_STATES_FOLLOW_THREADS */
/* lock-free O(1)-lookup CPU-local storage facility for tracking recursive fpu
* pushing/popping.
*
@@ -374,17 +245,6 @@ static inline struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(
}
}
#endif /* !LINUXKM_FPU_STATES_FOLLOW_THREADS */
#ifdef WOLFSSL_COMMERCIAL_LICENSE
static struct fpstate *wc_linuxkm_fpstate_buf_from_fpu_state(
struct wc_thread_fpu_count_ent *state)
{
size_t i = (size_t)(state - wc_linuxkm_fpu_states) / sizeof(*state);
return &wc_linuxkm_fpu_savebufs[i].fpstate;
}
#endif
static void wc_linuxkm_fpu_state_release_unlikely(
struct wc_thread_fpu_count_ent *ent)
{
@@ -420,6 +280,8 @@ WARN_UNUSED_RESULT int can_save_vector_registers_x86(void)
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0, 0);
if ((pstate != NULL) && (pstate->fpu_state != 0U)) {
if (unlikely(pstate->fpu_state & WC_FPU_INHIBITED_FLAG))
return 0;
if (unlikely((pstate->fpu_state & WC_FPU_COUNT_MASK)
== WC_FPU_COUNT_MASK))
{
@@ -459,25 +321,25 @@ WARN_UNUSED_RESULT int can_save_vector_registers_x86(void)
#endif
}
WARN_UNUSED_RESULT int save_vector_registers_x86(void)
WARN_UNUSED_RESULT int save_vector_registers_x86(int inhibit_p)
{
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(1, 0);
#else
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0, 0);
#endif
/* allow for nested calls */
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
if (pstate == NULL)
return MEMORY_E;
#endif
if (
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
(pstate != NULL) &&
#endif
(pstate->fpu_state != 0U))
{
if (pstate && (pstate->fpu_state != 0U)) {
if (unlikely(pstate->fpu_state & WC_FPU_INHIBITED_FLAG)) {
if (inhibit_p) {
/* allow recursive inhibit calls as long as the whole stack of
* them is inhibiting.
*/
++pstate->fpu_state;
return 0;
}
else
return WC_ACCEL_INHIBIT_E;
}
if (unlikely(inhibit_p))
return BAD_STATE_E;
if (unlikely((pstate->fpu_state & WC_FPU_COUNT_MASK)
== WC_FPU_COUNT_MASK))
{
@@ -490,6 +352,31 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
}
}
if (inhibit_p) {
preempt_disable();
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
/* inhibit migration, which gums up the algorithm in
* kernel_fpu_{begin,end}().
*/
migrate_disable();
#endif
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
if (pstate == NULL) {
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
preempt_enable();
return BAD_STATE_E;
}
pstate->fpu_state =
WC_FPU_INHIBITED_FLAG + 1U;
return 0;
}
if (irq_fpu_usable()
#if defined(TIF_NEED_FPU_LOAD) && \
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) && \
@@ -514,12 +401,6 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
* that corrupts the register state.
*/
#ifdef WOLFSSL_COMMERCIAL_LICENSE
struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
fpregs_lock();
fpstate->xfeatures = ~0UL;
os_xsave(fpstate);
#else /* !WOLFSSL_COMMERCIAL_LICENSE */
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
/* inhibit migration, which gums up the algorithm in
@@ -529,29 +410,22 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
#endif
kernel_fpu_begin();
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
if (pstate == NULL) {
kernel_fpu_end();
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
return BAD_STATE_E;
}
#endif
#endif /* !WOLFSSL_COMMERCIAL_LICENSE */
/* set msb to 0 to trigger kernel_fpu_end() at cleanup. */
pstate->fpu_state = 1U;
} else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0)) {
static int warned_fpu_forbidden = 0;
if (! warned_fpu_forbidden)
pr_err("save_vector_registers_x86 called from IRQ handler.\n");
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
return BAD_STATE_E;
}
#if defined(TIF_NEED_FPU_LOAD) && \
@@ -566,25 +440,22 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
*/
preempt_disable();
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_disable();
#endif
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
if (pstate == NULL) {
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
preempt_enable();
return BAD_STATE_E;
}
#endif
/* set msb to 1 to inhibit kernel_fpu_end() at cleanup. */
/* set _ALREADY_ flag to 1 to inhibit kernel_fpu_end() at cleanup. */
pstate->fpu_state =
WC_FPU_SAVED_MASK + 1U;
WC_FPU_ALREADY_FLAG + 1U;
}
#endif /* TIF_NEED_FPU_LOAD && <5.17.0 && !5.10.180+ */
else {
@@ -594,9 +465,6 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
" thread without previous FPU save.\n");
warned_fpu_forbidden = 1;
}
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
return BAD_STATE_E;
}
@@ -618,32 +486,17 @@ void restore_vector_registers_x86(void)
}
if (pstate->fpu_state == 0U) {
#ifdef WOLFSSL_COMMERCIAL_LICENSE
struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
os_xrstor(fpstate, fpstate->xfeatures);
fpregs_unlock();
#else
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
kernel_fpu_end();
#endif
} else {
} else if (unlikely(pstate->fpu_state & (WC_FPU_INHIBITED_FLAG | WC_FPU_ALREADY_FLAG))) {
pstate->fpu_state = 0U;
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
preempt_enable();
}
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
return;
}

View File

@@ -647,6 +647,9 @@ const char* wc_GetErrorString(int error)
case ASCON_AUTH_E:
return "ASCON Authentication check fail";
case WC_ACCEL_INHIBIT_E:
return "Crypto acceleration is currently inhibited";
case MAX_CODE_E:
case WC_SPAN1_MIN_CODE_E:
case MIN_CODE_E:

View File

@@ -189,8 +189,7 @@ This library contains implementation for the random number generator.
{
intel_flags = cpuid_get_flags();
}
#if (defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)) && \
!defined(WOLFSSL_LINUXKM)
#if defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)
static int wc_GenerateSeed_IntelRD(OS_Seed* os, byte* output, word32 sz);
#endif
#ifdef HAVE_INTEL_RDRAND
@@ -254,7 +253,12 @@ This library contains implementation for the random number generator.
#endif
#elif defined(HAVE_AMD_RDSEED)
/* This will yield a SEED_SZ of 16kb. Since nonceSz will be 0,
* we'll add an additional 8kb on top. */
* we'll add an additional 8kb on top.
*
* See "AMD RNG ESV Public Use Document". Version 0.7 of October 24,
* 2024 specifies 0.656 to 1.312 bits of entropy per 128 bit block of
* RDSEED output, depending on CPU family.
*/
#define ENTROPY_SCALE_FACTOR (512)
#elif defined(HAVE_INTEL_RDSEED) || defined(HAVE_INTEL_RDRAND)
/* The value of 2 applies to Intel's RDSEED which provides about
@@ -2573,7 +2577,6 @@ static WC_INLINE int IntelRDseed64_r(word64* rnd)
return -1;
}
#ifndef WOLFSSL_LINUXKM
/* return 0 on success */
static int wc_GenerateSeed_IntelRD(OS_Seed* os, byte* output, word32 sz)
{
@@ -2604,7 +2607,6 @@ static int wc_GenerateSeed_IntelRD(OS_Seed* os, byte* output, word32 sz)
return 0;
}
#endif
#endif /* HAVE_INTEL_RDSEED || HAVE_AMD_RDSEED */
@@ -3778,16 +3780,69 @@ int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
#endif /* end WOLFSSL_ESPIDF */
#elif defined(WOLFSSL_LINUXKM)
/* When registering the kernel default DRBG with a native/intrinsic entropy
* source, fallback to get_random_bytes() isn't allowed because we replace
* it with our DRBG.
*/
#if defined(HAVE_ENTROPY_MEMUSE) && \
defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT)
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
{
(void)os;
return wc_Entropy_Get(MAX_ENTROPY_BITS, output, sz);
}
#elif (defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)) && \
defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT)
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
{
(void)os;
return wc_GenerateSeed_IntelRD(NULL, output, sz);
}
#else /* !((HAVE_ENTROPY_MEMUSE || HAVE_*_RDSEED) && LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT) */
#include <linux/random.h>
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
{
(void)os;
int ret;
#ifdef HAVE_ENTROPY_MEMUSE
ret = wc_Entropy_Get(MAX_ENTROPY_BITS, output, sz);
if (ret == 0) {
return 0;
}
#ifdef ENTROPY_MEMUSE_FORCE_FAILURE
/* Don't fallback to /dev/urandom. */
return ret;
#endif
#endif
#if defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)
if (IS_INTEL_RDSEED(intel_flags)) {
ret = wc_GenerateSeed_IntelRD(NULL, output, sz);
#ifndef FORCE_FAILURE_RDSEED
if (ret == 0)
#endif
{
return ret;
}
}
#endif /* HAVE_INTEL_RDSEED || HAVE_AMD_RDSEED */
(void)ret;
get_random_bytes(output, sz);
return 0;
}
#endif /* !(HAVE_*_RDSEED && LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT) */
#elif defined(WOLFSSL_RENESAS_TSIP)
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)

View File

@@ -304,11 +304,12 @@ enum wolfCrypt_ErrorCodes {
DEADLOCK_AVERTED_E = -1000, /* Deadlock averted -- retry the call */
ASCON_AUTH_E = -1001, /* ASCON Authentication check failure */
WC_ACCEL_INHIBIT_E = -1002, /* Crypto acceleration is currently inhibited */
WC_SPAN2_LAST_E = -1001, /* Update to indicate last used error code */
WC_SPAN2_LAST_E = -1002, /* Update to indicate last used error code */
WC_SPAN2_MIN_CODE_E = -1999, /* Last usable code in span 2 */
WC_LAST_E = -1001, /* the last code used either here or in
WC_LAST_E = -1002, /* the last code used either here or in
* error-ssl.h
*/