linuxkm/linuxkm_wc_port.h: add default setup for LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT, to make visibility in random.c;

linuxkm/lkcapi_sha_glue.c: revert f7c7ac275a (get_drbg() DISABLE_VECTOR_REGISTERS() for crypto_default_rng) -- compiler/inlining bug makes it break on at least one target, so caller needs to retain responsibility;

linuxkm/x86_vector_register_glue.c: in wc_save_vector_registers_x86(), always return WC_ACCEL_INHIBIT_E if already fpu_state & WC_FPU_INHIBITED_FLAG, for safe+correct dynamics on recursive calls.
This commit is contained in:
Daniel Pouzzner
2025-08-30 12:08:57 -05:00
parent 7ef94284cc
commit 7df8ee4081
3 changed files with 31 additions and 33 deletions

View File

@@ -422,6 +422,17 @@
#define WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS
#endif
/* setup for LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT needs to be here
* to assure that calls to get_random_bytes() in random.c are gated out
* (they would recurse, potentially infinitely).
*/
#if (defined(LINUXKM_LKCAPI_REGISTER_ALL) && \
!defined(LINUXKM_LKCAPI_DONT_REGISTER_HASH_DRBG) && \
!defined(LINUXKM_LKCAPI_DONT_REGISTER_HASH_DRBG_DEFAULT)) && \
!defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT)
#define LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
#endif
#ifndef __PIE__
#include <linux/crypto.h>
#include <linux/scatterlist.h>

View File

@@ -374,10 +374,7 @@
!defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG)
#define LINUXKM_LKCAPI_REGISTER_HASH_DRBG
#endif
#if (defined(LINUXKM_LKCAPI_REGISTER_ALL) && !defined(LINUXKM_LKCAPI_DONT_REGISTER_HASH_DRBG_DEFAULT)) && \
!defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT)
#define LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
#endif
/* setup for LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT is in linuxkm_wc_port.h */
#else
#undef LINUXKM_LKCAPI_REGISTER_HASH_DRBG
#endif
@@ -968,7 +965,6 @@ struct wc_linuxkm_drbg_ctx {
struct wc_rng_inst {
wolfSSL_Atomic_Int lock;
WC_RNG rng;
int disabled_vec_ops;
} *rngs; /* one per CPU ID */
};
@@ -1090,14 +1086,8 @@ static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
for (;;) {
int expected = 0;
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, new_lock_value, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE))) {
struct wc_rng_inst *drbg = &ctx->rngs[n];
if (tfm == crypto_default_rng)
drbg->disabled_vec_ops = (DISABLE_VECTOR_REGISTERS() == 0);
else
drbg->disabled_vec_ops = 0;
return drbg;
}
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, new_lock_value, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
return &ctx->rngs[n];
++n;
if (n >= (int)ctx->n_rngs)
n = 0;
@@ -1115,11 +1105,8 @@ static inline struct wc_rng_inst *get_drbg_n(struct wc_linuxkm_drbg_ctx *ctx, in
for (;;) {
int expected = 0;
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, 1, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE))) {
struct wc_rng_inst *drbg = &ctx->rngs[n];
drbg->disabled_vec_ops = 0;
return drbg;
}
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, 1, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
return &ctx->rngs[n];
if (can_sleep) {
if (signal_pending(current))
return NULL;
@@ -1137,10 +1124,6 @@ static inline void put_drbg(struct wc_rng_inst *drbg) {
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
int migration_disabled = (drbg->lock == 2);
#endif
if (drbg->disabled_vec_ops) {
REENABLE_VECTOR_REGISTERS();
drbg->disabled_vec_ops = 0;
}
__atomic_store_n(&(drbg->lock),0,__ATOMIC_RELEASE);
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
@@ -1154,6 +1137,7 @@ static int wc_linuxkm_drbg_generate(struct crypto_rng *tfm,
u8 *dst, unsigned int dlen)
{
int ret, retried = 0;
int need_fpu_restore;
struct wc_rng_inst *drbg = get_drbg(tfm);
if (! drbg) {
@@ -1161,6 +1145,11 @@ static int wc_linuxkm_drbg_generate(struct crypto_rng *tfm,
return -EFAULT;
}
/* for the default RNG, make sure we don't cache an underlying SHA256
* method that uses vector insns (forbidden from irq handlers).
*/
need_fpu_restore = (tfm == crypto_default_rng) ? (DISABLE_VECTOR_REGISTERS() == 0) : 0;
retry:
if (slen > 0) {
@@ -1194,6 +1183,8 @@ retry:
out:
if (need_fpu_restore)
REENABLE_VECTOR_REGISTERS();
put_drbg(drbg);
return ret;

View File

@@ -346,6 +346,13 @@ WARN_UNUSED_RESULT int wc_save_vector_registers_x86(enum wc_svr_flags flags)
/* allow for nested calls */
if (pstate && (pstate->fpu_state != 0U)) {
if (pstate->fpu_state & WC_FPU_INHIBITED_FLAG) {
/* don't allow recursive inhibit calls when already inhibited --
* it would add no functionality and require keeping a separate
* count of inhibit recursions.
*/
return WC_ACCEL_INHIBIT_E;
}
if (unlikely((pstate->fpu_state & WC_FPU_COUNT_MASK)
== WC_FPU_COUNT_MASK))
{
@@ -353,17 +360,6 @@ WARN_UNUSED_RESULT int wc_save_vector_registers_x86(enum wc_svr_flags flags)
"pid %d on CPU %d.\n", pstate->pid, raw_smp_processor_id());
return BAD_STATE_E;
}
if (pstate->fpu_state & WC_FPU_INHIBITED_FLAG) {
if (flags & WC_SVR_FLAG_INHIBIT) {
/* allow recursive inhibit calls as long as the whole stack of
* them is inhibiting.
*/
++pstate->fpu_state;
return 0;
}
else
return WC_ACCEL_INHIBIT_E;
}
if (flags & WC_SVR_FLAG_INHIBIT) {
++pstate->fpu_state;
pstate->fpu_state |= WC_FPU_INHIBITED_FLAG;