Merge pull request #8943 from douzzer/20250617-linuxkm-get_random_bytes

20250617-linuxkm-get_random_bytes
This commit is contained in:
philljj
2025-07-03 11:22:08 -05:00
committed by GitHub
26 changed files with 4494 additions and 432 deletions

4
.gitignore vendored
View File

@@ -3,7 +3,6 @@ ctaocrypt/src/src/
*.lo
*.la
*.o
*.patch
*.deps
*.d
*.libs
@@ -246,6 +245,9 @@ linuxkm/libwolfssl.mod.c
linuxkm/libwolfssl.lds
linuxkm/module_exports.c
linuxkm/linuxkm/get_thread_size
linuxkm/linuxkm
linuxkm/src
linuxkm/patches/src
*.nds
# autotools generated

View File

@@ -112,6 +112,7 @@ CONFIG_IDF_TARGET_ESP32S3
CONFIG_IDF_TARGET_ESP8266
CONFIG_IDF_TARGET_ESP8684
CONFIG_KASAN
CONFIG_KPROBES
CONFIG_MAIN_TASK_STACK_SIZE
CONFIG_MBEDTLS_CERTIFICATE_BUNDLE
CONFIG_MBEDTLS_PSA_CRYPTO_C
@@ -294,7 +295,6 @@ LIBWOLFSSL_VERSION_GIT_ORIGIN
LIBWOLFSSL_VERSION_GIT_SHORT_HASH
LIBWOLFSSL_VERSION_GIT_TAG
LINUXKM_DONT_FORCE_FIPS_ENABLED
LINUXKM_FPU_STATES_FOLLOW_THREADS
LINUXKM_LKCAPI_PRIORITY_ALLOW_MASKING
LINUX_CYCLE_COUNT
LINUX_RUSAGE_UTIME
@@ -369,6 +369,7 @@ NO_HANDSHAKE_DONE_CB
NO_IMX6_CAAM_AES
NO_IMX6_CAAM_HASH
NO_KEEP_PEER_CERT
NO_LINUXKM_DRBG_GET_RANDOM_BYTES
NO_OLD_NAMES
NO_OLD_POLY1305
NO_OLD_TIMEVAL_NAME
@@ -539,6 +540,7 @@ USE_ALT_MPRIME
USE_ANY_ADDR
USE_CERT_BUFFERS_25519
USE_CERT_BUFFERS_3072
USE_CONTESTMUTEX
USE_ECDSA_KEYSZ_HASH_ALGO
USE_FULL_ASSERT
USE_HAL_DRIVER
@@ -652,7 +654,6 @@ WOLFSSL_CHECK_MEM_ZERO
WOLFSSL_CHIBIOS
WOLFSSL_CLANG_TIDY
WOLFSSL_CLIENT_EXAMPLE
WOLFSSL_COMMERCIAL_LICENSE
WOLFSSL_CONTIKI
WOLFSSL_CRL_ALLOW_MISSING_CDP
WOLFSSL_CUSTOM_CONFIG
@@ -718,6 +719,9 @@ WOLFSSL_KYBER_NO_DECAPSULATE
WOLFSSL_KYBER_NO_ENCAPSULATE
WOLFSSL_KYBER_NO_MAKE_KEY
WOLFSSL_LIB
WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES
WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE
WOLFSSL_LINUXKM_USE_MUTEXES
WOLFSSL_LMS_CACHE_BITS
WOLFSSL_LMS_FULL_HASH
WOLFSSL_LMS_LARGE_CACHES
@@ -780,6 +784,7 @@ WOLFSSL_NO_SPHINCS
WOLFSSL_NO_STRICT_CIPHER_SUITE
WOLFSSL_NO_TICKET_EXPIRE
WOLFSSL_NO_TRUSTED_CERTS_VERIFY
WOLFSSL_NO_WORD64_OPS
WOLFSSL_NO_XOR_OPS
WOLFSSL_NRF51_AES
WOLFSSL_OLDTLS_AEAD_CIPHERSUITES

View File

@@ -119,6 +119,22 @@ then
AM_CCASFLAGS="$AM_CCASFLAGS -DWOLFSSL_EXPERIMENTAL_SETTINGS"
fi
# Linux Kernel Module options (more options later)
AC_ARG_ENABLE([linuxkm],
[AS_HELP_STRING([--enable-linuxkm],[Enable Linux Kernel Module (default: disabled)])],
[ENABLED_LINUXKM=$enableval],
[ENABLED_LINUXKM=no]
)
AC_ARG_ENABLE([linuxkm-defaults],
[AS_HELP_STRING([--enable-linuxkm-defaults],[Enable feature defaults for Linux Kernel Module (default: disabled)])],
[ENABLED_LINUXKM_DEFAULTS=$enableval],
[ENABLED_LINUXKM_DEFAULTS=$ENABLED_LINUXKM]
)
AC_CHECK_HEADERS([arpa/inet.h fcntl.h limits.h netdb.h netinet/in.h stddef.h time.h sys/ioctl.h sys/socket.h sys/time.h errno.h sys/un.h ctype.h])
AC_CHECK_LIB([network],[socket])
AC_C_BIGENDIAN
@@ -307,19 +323,10 @@ AC_ARG_ENABLE([hmac],
[ ENABLED_HMAC=yes ]
)
# enable HMAC hash copying automatically for x86_64 and aarch64 (except Linux kernel module)
HMAC_COPY_DEFAULT=no
if test "$ENABLED_LINUXKM_DEFAULTS" = "no"
then
if test "$host_cpu" = "x86_64" || test "$host_cpu" = "aarch64" || test "$host_cpu" = "amd64"
then
HMAC_COPY_DEFAULT=yes
fi
fi
AC_ARG_ENABLE([hmac-copy],
[AS_HELP_STRING([--enable-hmac-copy],[Enables digest copying implementation for HMAC (default: disabled)])],
[ ENABLED_HMAC_COPY=$enableval ],
[ ENABLED_HMAC_COPY=$HMAC_COPY_DEFAULT ]
[ ENABLED_HMAC_COPY=no ]
)
if test "$ENABLED_HMAC_COPY" = "yes"
then
@@ -658,18 +665,7 @@ AC_ARG_ENABLE([benchmark],
)
# Linux Kernel Module
AC_ARG_ENABLE([linuxkm],
[AS_HELP_STRING([--enable-linuxkm],[Enable Linux Kernel Module (default: disabled)])],
[ENABLED_LINUXKM=$enableval],
[ENABLED_LINUXKM=no]
)
AC_ARG_ENABLE([linuxkm-defaults],
[AS_HELP_STRING([--enable-linuxkm-defaults],[Enable feature defaults for Linux Kernel Module (default: disabled)])],
[ENABLED_LINUXKM_DEFAULTS=$enableval],
[ENABLED_LINUXKM_DEFAULTS=$ENABLED_LINUXKM]
)
# Remainder of Linux kernel module options, continued from earlier:
AC_ARG_ENABLE([linuxkm-pie],
[AS_HELP_STRING([--enable-linuxkm-pie],[Enable relocatable object build of Linux kernel module (default: disabled)])],
@@ -5649,10 +5645,18 @@ AC_ARG_ENABLE([pwdbased],
# MemUse Entropy
# wolfEntropy Software Jitter SP800-90B certifiable entropy source
if test "$ENABLED_LINUXKM_DEFAULTS" = "yes"
then
ENABLED_ENTROPY_MEMUSE_DEFAULT=yes
else
ENABLED_ENTROPY_MEMUSE_DEFAULT=no
fi
AC_ARG_ENABLE([wolfEntropy],
[AS_HELP_STRING([--enable-wolfEntropy],[Enable memuse entropy support (default: disabled)])],
[ ENABLED_ENTROPY_MEMUSE=$enableval ],
[ ENABLED_ENTROPY_MEMUSE=no ]
[ ENABLED_ENTROPY_MEMUSE=$ENABLED_ENTROPY_MEMUSE_DEFAULT ]
)
AC_ARG_ENABLE([entropy-memuse],
[AS_HELP_STRING([--enable-entropy-memuse],[Enable memuse entropy support (default: disabled)])],
@@ -9538,7 +9542,11 @@ if test -n "$MPI_MAX_KEY_BITS" -o -n "$WITH_MAX_ECC_BITS"; then
fi
AC_ARG_ENABLE([linuxkm-lkcapi-register],
[AS_HELP_STRING([--enable-linuxkm-lkcapi-register],[Register wolfCrypt implementations with the Linux Kernel Crypto API backplane. Possible values are "none", "all", "cbc(aes)", "cfb(aes)", "gcm(aes)", and "xts(aes)", or a comma-separate combination. (default: none)])],
[AS_HELP_STRING([--enable-linuxkm-lkcapi-register],[Register wolfCrypt implementations with the Linux Kernel Crypto API backplane.
Possible values are "none" or a comma-separated combination of "all", "all-kconfig", "sysfs-nodes-only", "cbc(aes)", "cfb(aes)",
"gcm(aes)", "rfc4106(gcm(aes))", "xts(aes)", "ctr(aes)", "ofb(aes)", "ecb(aes)", "sha1", "sha2", "sha3", "hmac(sha1)", "hmac(sha2)",
"hmac(sha3)", "stdrng", "stdrng-default", "ecdsa", "ecdh", "rsa", "dh", and negations of the foregoing algorithms by prefixing "-".
(default: none)])],
[ENABLED_LINUXKM_LKCAPI_REGISTER=$enableval],
[ENABLED_LINUXKM_LKCAPI_REGISTER=no]
)

View File

@@ -301,6 +301,9 @@
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 13, 0)
#include <crypto/internal/sig.h>
#endif /* linux ver >= 6.13 */
#ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES
#include <linux/kprobes.h>
#endif
/* the LKCAPI assumes that expanded encrypt and decrypt keys will stay
* loaded simultaneously, and the Linux in-tree implementations have two
@@ -367,7 +370,7 @@
extern __must_check int allocate_wolfcrypt_linuxkm_fpu_states(void);
extern void free_wolfcrypt_linuxkm_fpu_states(void);
extern __must_check int can_save_vector_registers_x86(void);
extern __must_check int save_vector_registers_x86(void);
extern __must_check int save_vector_registers_x86(int inhibit_p);
extern void restore_vector_registers_x86(void);
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
@@ -383,11 +386,11 @@
#endif
#endif
#ifndef SAVE_VECTOR_REGISTERS
#define SAVE_VECTOR_REGISTERS(fail_clause) { \
int _svr_ret = save_vector_registers_x86(); \
if (_svr_ret != 0) { \
fail_clause \
} \
#define SAVE_VECTOR_REGISTERS(fail_clause) { \
int _svr_ret = save_vector_registers_x86(0); \
if (_svr_ret != 0) { \
fail_clause \
} \
}
#endif
#ifndef SAVE_VECTOR_REGISTERS2
@@ -395,17 +398,24 @@
#define SAVE_VECTOR_REGISTERS2() ({ \
int _fuzzer_ret = SAVE_VECTOR_REGISTERS2_fuzzer(); \
(_fuzzer_ret == 0) ? \
save_vector_registers_x86() : \
save_vector_registers_x86(0) : \
_fuzzer_ret; \
})
#else
#define SAVE_VECTOR_REGISTERS2() save_vector_registers_x86()
#define SAVE_VECTOR_REGISTERS2() save_vector_registers_x86(0)
#endif
#endif
#ifndef RESTORE_VECTOR_REGISTERS
#define RESTORE_VECTOR_REGISTERS() restore_vector_registers_x86()
#endif
#ifndef DISABLE_VECTOR_REGISTERS
#define DISABLE_VECTOR_REGISTERS() save_vector_registers_x86(1)
#endif
#ifndef REENABLE_VECTOR_REGISTERS
#define REENABLE_VECTOR_REGISTERS() restore_vector_registers_x86()
#endif
#elif defined(WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS) && (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
#error kernel module ARM SIMD is not yet tested or usable.
@@ -705,6 +715,12 @@
#endif
#endif
typeof(preempt_count) *preempt_count;
typeof(_raw_spin_lock_irqsave) *_raw_spin_lock_irqsave;
typeof(_raw_spin_trylock) *_raw_spin_trylock;
typeof(_raw_spin_unlock_irqrestore) *_raw_spin_unlock_irqrestore;
typeof(_cond_resched) *_cond_resched;
const void *_last_slot;
};
@@ -867,6 +883,18 @@
#define dump_stack (wolfssl_linuxkm_get_pie_redirect_table()->dump_stack)
#endif
#undef preempt_count /* just in case -- not a macro on x86. */
#define preempt_count (wolfssl_linuxkm_get_pie_redirect_table()->preempt_count)
#define _raw_spin_lock_irqsave (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_lock_irqsave)
#define _raw_spin_trylock (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_trylock)
#define _raw_spin_unlock_irqrestore (wolfssl_linuxkm_get_pie_redirect_table()->_raw_spin_unlock_irqrestore)
#define _cond_resched (wolfssl_linuxkm_get_pie_redirect_table()->_cond_resched)
/* this is defined in linux/spinlock.h as an inline that calls the unshimmed
* raw_spin_unlock_irqrestore(). use a macro here to supersede it.
*/
#define spin_unlock_irqrestore(lock, flags) raw_spin_unlock_irqrestore(&((lock)->rlock), flags)
#endif /* __PIE__ */
#endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */
@@ -925,9 +953,120 @@
* above, with the bevy of warnings suppressed, and the below include will
* be a redundant no-op.
*/
#include <linux/mutex.h>
typedef struct mutex wolfSSL_Mutex;
#define WOLFSSL_MUTEX_INITIALIZER(lockname) __MUTEX_INITIALIZER(lockname)
/* Copied from wc_port.h: For FIPS keep the function names the same */
#ifdef HAVE_FIPS
#define wc_InitMutex InitMutex
#define wc_FreeMutex FreeMutex
#define wc_LockMutex LockMutex
#define wc_UnLockMutex UnLockMutex
#endif /* HAVE_FIPS */
#ifdef WOLFSSL_LINUXKM_USE_MUTEXES
#ifdef LINUXKM_LKCAPI_REGISTER
/* must use spin locks when registering implementations with the
* kernel, because mutexes are forbidden when calling with nonzero
* irq_count().
*/
#error WOLFSSL_LINUXKM_USE_MUTEXES is incompatible with LINUXKM_LKCAPI_REGISTER.
#endif
#include <linux/mutex.h>
typedef struct mutex wolfSSL_Mutex;
#define WOLFSSL_MUTEX_INITIALIZER(lockname) __MUTEX_INITIALIZER(lockname)
/* Linux kernel mutex routines are voids, alas. */
static inline int wc_InitMutex(wolfSSL_Mutex* m)
{
mutex_init(m);
return 0;
}
static inline int wc_FreeMutex(wolfSSL_Mutex* m)
{
mutex_destroy(m);
return 0;
}
static inline int wc_LockMutex(wolfSSL_Mutex* m)
{
if (in_nmi() || in_hardirq() || in_softirq())
return BAD_STATE_E;
mutex_lock(m);
return 0;
}
static inline int wc_UnLockMutex(wolfSSL_Mutex* m)
{
mutex_unlock(m);
return 0;
}
#else
typedef struct {
spinlock_t lock;
unsigned long irq_flags;
} wolfSSL_Mutex;
#define WOLFSSL_MUTEX_INITIALIZER(lockname) { .lock =__SPIN_LOCK_UNLOCKED(lockname), .irq_flags = 0 }
static __always_inline int wc_InitMutex(wolfSSL_Mutex* m)
{
m->lock = __SPIN_LOCK_UNLOCKED(m);
m->irq_flags = 0;
return 0;
}
static __always_inline int wc_FreeMutex(wolfSSL_Mutex* m)
{
(void)m;
return 0;
}
static __always_inline int wc_LockMutex(wolfSSL_Mutex* m)
{
unsigned long irq_flags;
/* first, try the cheap way. */
if (spin_trylock_irqsave(&m->lock, irq_flags)) {
m->irq_flags = irq_flags;
return 0;
}
if (irq_count() != 0) {
/* Note, this catches calls while SAVE_VECTOR_REGISTERS()ed as
* required, because in_softirq() is always true while saved,
* even for WC_FPU_INHIBITED_FLAG contexts.
*/
spin_lock_irqsave(&m->lock, irq_flags);
m->irq_flags = irq_flags;
return 0;
}
else {
for (;;) {
if (spin_trylock_irqsave(&m->lock, irq_flags)) {
m->irq_flags = irq_flags;
return 0;
}
cond_resched();
}
}
__builtin_unreachable();
}
static __always_inline int wc_UnLockMutex(wolfSSL_Mutex* m)
{
spin_unlock_irqrestore(&m->lock, m->irq_flags);
return 0;
}
#endif
/* Undo copied defines from wc_port.h, to avoid redefinition warnings. */
#ifdef HAVE_FIPS
#undef wc_InitMutex
#undef wc_FreeMutex
#undef wc_LockMutex
#undef wc_UnLockMutex
#endif /* HAVE_FIPS */
/* prevent gcc's mm_malloc.h from being included, since it unconditionally
* includes stdlib.h, which is kernel-incompatible.
@@ -946,14 +1085,14 @@
_alloc_sz; \
})
#ifdef HAVE_KVMALLOC
#define malloc(size) kvmalloc_node(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL, NUMA_NO_NODE)
#define malloc(size) kvmalloc_node(WC_LINUXKM_ROUND_UP_P_OF_2(size), (preempt_count() == 0 ? GFP_KERNEL : GFP_ATOMIC), NUMA_NO_NODE)
#define free(ptr) kvfree(ptr)
void *lkm_realloc(void *ptr, size_t newsize);
#define realloc(ptr, newsize) lkm_realloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize))
#else
#define malloc(size) kmalloc(WC_LINUXKM_ROUND_UP_P_OF_2(size), GFP_KERNEL)
#define malloc(size) kmalloc(WC_LINUXKM_ROUND_UP_P_OF_2(size), (preempt_count() == 0 ? GFP_KERNEL : GFP_ATOMIC))
#define free(ptr) kfree(ptr)
#define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), GFP_KERNEL)
#define realloc(ptr, newsize) krealloc(ptr, WC_LINUXKM_ROUND_UP_P_OF_2(newsize), (preempt_count() == 0 ? GFP_KERNEL : GFP_ATOMIC))
#endif
#ifndef static_assert

View File

@@ -82,14 +82,6 @@
#include <wolfssl/wolfcrypt/dh.h>
#include <crypto/dh.h>
/* need misc.c for ForceZero(). */
#ifdef NO_INLINE
#include <wolfssl/wolfcrypt/misc.h>
#else
#define WOLFSSL_MISC_INCLUDED
#include <wolfcrypt/src/misc.c>
#endif
#define WOLFKM_DH_NAME ("dh")
#define WOLFKM_DH_DRIVER ("dh" WOLFKM_DRIVER_FIPS \
"-wolfcrypt")

View File

@@ -58,14 +58,6 @@
#include <wolfssl/wolfcrypt/ecc.h>
#include <crypto/ecdh.h>
/* need misc.c for ForceZero(). */
#ifdef NO_INLINE
#include <wolfssl/wolfcrypt/misc.h>
#else
#define WOLFSSL_MISC_INCLUDED
#include <wolfcrypt/src/misc.c>
#endif
#define WOLFKM_ECDH_DRIVER ("ecdh-wolfcrypt")
#define WOLFKM_ECDH_P192_NAME ("ecdh-nist-p192")

View File

@@ -43,14 +43,12 @@
#include <linux/fips.h>
#endif
#if defined(HAVE_FIPS) && FIPS_VERSION3_LT(6,0,0)
/* need misc.c for ForceZero(). */
#ifdef NO_INLINE
#include <wolfssl/wolfcrypt/misc.h>
#else
#define WOLFSSL_MISC_INCLUDED
#include <wolfcrypt/src/misc.c>
#endif
/* need misc.c for ForceZero(). */
#ifdef NO_INLINE
#include <wolfssl/wolfcrypt/misc.h>
#else
#define WOLFSSL_MISC_INCLUDED
#include <wolfcrypt/src/misc.c>
#endif
#ifndef WOLFSSL_LINUXKM_LKCAPI_PRIORITY

View File

@@ -1255,8 +1255,8 @@ pkcs1pad_verify_out:
#else
/* Returns the rsa key size:
* linux kernel version < 6.16: returns key size in bytes.
* linux kernel version >= 6.16: returns key size in bits.
* linux kernel version < 6.15.3: returns key size in bytes.
* linux kernel version >= 6.15.3: returns key size in bits.
* */
static unsigned int km_pkcs1_key_size(struct crypto_sig *tfm)
{
@@ -1264,11 +1264,11 @@ static unsigned int km_pkcs1_key_size(struct crypto_sig *tfm)
ctx = crypto_sig_ctx(tfm);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 16, 0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 15, 3)
return (unsigned int) ctx->key_len * WOLFSSL_BIT_SIZE;
#else
return (unsigned int) ctx->key_len;
#endif /* linux >= 6.16 */
#endif /* linux >= 6.15.3 */
}
/*
@@ -3093,16 +3093,16 @@ static int linuxkm_test_pkcs1_driver(const char * driver, int nbits,
{
/* The behavior of crypto_sig_Xsize (X= max, key, digest) changed
* at linux kernel v6.16:
* < 6.16: all three should return the same value (in bytes).
* >= 6.16: keysize is in bits, maxsize and digestsize in bytes. */
* at linux kernel v6.15.3:
* < 6.15.3: all three should return the same value (in bytes).
* >= 6.15.3: keysize is in bits, maxsize and digestsize in bytes. */
unsigned int maxsize = crypto_sig_maxsize(tfm);
unsigned int keysize = crypto_sig_keysize(tfm);
unsigned int digestsize = crypto_sig_digestsize(tfm);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 16, 0)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 15, 3)
keysize = ((keysize + WOLFSSL_BIT_SIZE - 1) / WOLFSSL_BIT_SIZE);
#endif /* linux >= 6.16 */
#endif /* linux >= 6.15.3 */
#ifdef WOLFKM_DEBUG_RSA
pr_info("info: crypto_sig_{max, key, digest}size: "

View File

@@ -941,7 +941,7 @@ struct wc_swallow_the_semicolon
struct wc_linuxkm_drbg_ctx {
struct wc_rng_inst {
wolfSSL_Mutex lock;
wolfSSL_Atomic_Int lock;
WC_RNG rng;
} *rngs; /* one per CPU ID */
};
@@ -952,8 +952,12 @@ static inline void wc_linuxkm_drbg_ctx_clear(struct wc_linuxkm_drbg_ctx * ctx)
if (ctx->rngs) {
for (i = 0; i < nr_cpu_ids; ++i) {
(void)wc_FreeMutex(&ctx->rngs[i].lock);
wc_FreeRng(&ctx->rngs[i].rng);
if (ctx->rngs[i].lock != 0) {
/* better to leak than to crash. */
pr_err("BUG: wc_linuxkm_drbg_ctx_clear called with DRBG #%d still locked.", i);
}
else
wc_FreeRng(&ctx->rngs[i].rng);
}
free(ctx->rngs);
ctx->rngs = NULL;
@@ -962,11 +966,15 @@ static inline void wc_linuxkm_drbg_ctx_clear(struct wc_linuxkm_drbg_ctx * ctx)
return;
}
static volatile int wc_linuxkm_drbg_init_tfm_disable_vector_registers = 0;
static int wc_linuxkm_drbg_init_tfm(struct crypto_tfm *tfm)
{
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_tfm_ctx(tfm);
unsigned int i;
int ret;
int need_reenable_vec = 0;
int can_sleep = (preempt_count() == 0);
ctx->rngs = (struct wc_rng_inst *)malloc(sizeof(*ctx->rngs) * nr_cpu_ids);
if (! ctx->rngs)
@@ -974,20 +982,19 @@ static int wc_linuxkm_drbg_init_tfm(struct crypto_tfm *tfm)
XMEMSET(ctx->rngs, 0, sizeof(*ctx->rngs) * nr_cpu_ids);
for (i = 0; i < nr_cpu_ids; ++i) {
ret = wc_InitMutex(&ctx->rngs[i].lock);
if (ret != 0) {
ret = -EINVAL;
break;
}
/* Note the new DRBG instance is seeded, and later reseeded, from system
* get_random_bytes() via wc_GenerateSeed().
*/
ctx->rngs[i].lock = 0;
if (wc_linuxkm_drbg_init_tfm_disable_vector_registers)
need_reenable_vec = (DISABLE_VECTOR_REGISTERS() == 0);
ret = wc_InitRng(&ctx->rngs[i].rng);
if (need_reenable_vec)
REENABLE_VECTOR_REGISTERS();
if (ret != 0) {
pr_warn_once("WARNING: wc_InitRng returned %d\n",ret);
ret = -EINVAL;
break;
}
if (can_sleep)
cond_resched();
}
if (ret != 0) {
@@ -1006,39 +1013,119 @@ static void wc_linuxkm_drbg_exit_tfm(struct crypto_tfm *tfm)
return;
}
static int wc_linuxkm_drbg_default_instance_registered = 0;
static inline struct wc_rng_inst *get_drbg(struct crypto_rng *tfm) {
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
int n, new_lock_value;
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
if (tfm == crypto_default_rng) {
migrate_disable(); /* this actually makes irq_count() nonzero, so that
* DISABLE_VECTOR_REGISTERS() is superfluous, but
* don't depend on that.
*/
new_lock_value = 2;
}
else
#endif
{
new_lock_value = 1;
}
n = raw_smp_processor_id();
for (;;) {
int expected = 0;
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, new_lock_value, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
return &ctx->rngs[n];
++n;
if (n >= (int)nr_cpu_ids)
n = 0;
cpu_relax();
}
__builtin_unreachable();
}
static inline struct wc_rng_inst *get_drbg_n(struct wc_linuxkm_drbg_ctx *ctx, int n) {
for (;;) {
int expected = 0;
if (likely(__atomic_compare_exchange_n(&ctx->rngs[n].lock, &expected, 1, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)))
return &ctx->rngs[n];
cpu_relax();
}
__builtin_unreachable();
}
static inline void put_drbg(struct wc_rng_inst *drbg) {
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
int migration_disabled = (drbg->lock == 2);
#endif
__atomic_store_n(&(drbg->lock),0,__ATOMIC_RELEASE);
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
if (migration_disabled)
migrate_enable();
#endif
}
static int wc_linuxkm_drbg_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
int ret;
/* Note, core is not locked, so the actual core ID may change while
* executing, hence the mutex.
* The mutex is also needed to coordinate with wc_linuxkm_drbg_seed(), which
int ret, retried = 0;
/* Note, core is not necessarily locked on entry, so the actual core ID may
* change while executing, hence the lock.
*
* The lock is also needed to coordinate with wc_linuxkm_drbg_seed(), which
* seeds all instances.
*/
int my_cpu = raw_smp_processor_id();
wolfSSL_Mutex *lock = &ctx->rngs[my_cpu].lock;
WC_RNG *rng = &ctx->rngs[my_cpu].rng;
struct wc_rng_inst *drbg = get_drbg(tfm);
/* for the default RNG, make sure we don't cache an underlying SHA256
* method that uses vector insns (forbidden from irq handlers).
*/
int need_fpu_restore = (tfm == crypto_default_rng) ? (DISABLE_VECTOR_REGISTERS() == 0) : 0;
if (wc_LockMutex(lock) != 0)
return -EINVAL;
retry:
if (slen > 0) {
ret = wc_RNG_DRBG_Reseed(rng, src, slen);
ret = wc_RNG_DRBG_Reseed(&drbg->rng, src, slen);
if (ret != 0) {
pr_warn_once("WARNING: wc_RNG_DRBG_Reseed returned %d\n",ret);
ret = -EINVAL;
goto out;
}
}
ret = wc_RNG_GenerateBlock(rng, dst, dlen);
if (ret != 0)
ret = wc_RNG_GenerateBlock(&drbg->rng, dst, dlen);
if (unlikely(ret == WC_NO_ERR_TRACE(RNG_FAILURE_E)) && (! retried)) {
retried = 1;
wc_FreeRng(&drbg->rng);
ret = wc_InitRng(&drbg->rng);
if (ret == 0) {
pr_warn("WARNING: reinitialized DRBG #%d after RNG_FAILURE_E.", raw_smp_processor_id());
goto retry;
}
else {
pr_warn_once("ERROR: reinitialization of DRBG #%d after RNG_FAILURE_E failed with ret %d.", raw_smp_processor_id(), ret);
ret = -EINVAL;
}
}
else if (ret != 0) {
pr_warn_once("WARNING: wc_RNG_GenerateBlock returned %d\n",ret);
ret = -EINVAL;
}
out:
wc_UnLockMutex(lock);
if (need_fpu_restore)
REENABLE_VECTOR_REGISTERS();
put_drbg(drbg);
return ret;
}
@@ -1049,7 +1136,7 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
struct wc_linuxkm_drbg_ctx *ctx = (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(tfm);
u8 *seed_copy = NULL;
int ret;
unsigned int i;
int n;
if (slen == 0)
return 0;
@@ -1059,25 +1146,31 @@ static int wc_linuxkm_drbg_seed(struct crypto_rng *tfm,
return -ENOMEM;
XMEMCPY(seed_copy + 2, seed, slen);
for (i = 0; i < nr_cpu_ids; ++i) {
wolfSSL_Mutex *lock = &ctx->rngs[i].lock;
WC_RNG *rng = &ctx->rngs[i].rng;
for (n = nr_cpu_ids - 1; n >= 0; --n) {
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
/* perturb the seed with the CPU ID, so that no DRBG has the exact same
* seed.
*/
seed_copy[0] = (u8)(i >> 8);
seed_copy[1] = (u8)i;
seed_copy[0] = (u8)(n >> 8);
seed_copy[1] = (u8)n;
if (wc_LockMutex(lock) != 0)
return -EINVAL;
{
/* for the default RNG, make sure we don't cache an underlying SHA256
* method that uses vector insns (forbidden from irq handlers).
*/
int need_fpu_restore = (tfm == crypto_default_rng) ? (DISABLE_VECTOR_REGISTERS() == 0) : 0;
ret = wc_RNG_DRBG_Reseed(&drbg->rng, seed_copy, slen + 2);
if (need_fpu_restore)
REENABLE_VECTOR_REGISTERS();
}
ret = wc_RNG_DRBG_Reseed(rng, seed_copy, slen + 2);
if (ret != 0) {
pr_warn_once("WARNING: wc_RNG_DRBG_Reseed returned %d\n",ret);
ret = -EINVAL;
}
wc_UnLockMutex(lock);
put_drbg(drbg);
if (ret != 0)
break;
@@ -1103,9 +1196,429 @@ static struct rng_alg wc_linuxkm_drbg = {
}
};
static int wc_linuxkm_drbg_loaded = 0;
static int wc_linuxkm_drbg_default_instance_registered = 0;
WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
#ifdef NO_LINUXKM_DRBG_GET_RANDOM_BYTES
#undef LINUXKM_DRBG_GET_RANDOM_BYTES
#elif defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT) && \
(defined(WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS) || defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES))
#ifndef LINUXKM_DRBG_GET_RANDOM_BYTES
#define LINUXKM_DRBG_GET_RANDOM_BYTES
#endif
#else
#ifdef LINUXKM_DRBG_GET_RANDOM_BYTES
#error LINUXKM_DRBG_GET_RANDOM_BYTES configured with no callback model configured.
#undef LINUXKM_DRBG_GET_RANDOM_BYTES
#endif
#endif
#ifdef LINUXKM_DRBG_GET_RANDOM_BYTES
#if !(defined(HAVE_ENTROPY_MEMUSE) || defined(HAVE_INTEL_RDSEED) || \
defined(HAVE_AMD_RDSEED))
#error LINUXKM_DRBG_GET_RANDOM_BYTES requires a native or intrinsic entropy source.
#endif
#if defined(WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS) && defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES)
#error Conflicting callback model for LINUXKM_DRBG_GET_RANDOM_BYTES.
#endif
#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
static inline struct crypto_rng *get_crypto_default_rng(void) {
struct crypto_rng *current_crypto_default_rng = crypto_default_rng;
if (unlikely(! wc_linuxkm_drbg_default_instance_registered)) {
pr_warn("BUG: get_default_drbg_ctx() called without wc_linuxkm_drbg_default_instance_registered.");
return NULL;
}
/* note we can't call crypto_get_default_rng(), because it uses a mutex
* (not allowed in interrupt handlers). we do however sanity-check the
* cra_init function pointer, and these handlers are protected by
* random_bytes_cb_refcnt in the patched drivers/char/random.c.
*/
if (current_crypto_default_rng->base.__crt_alg->cra_init != wc_linuxkm_drbg_init_tfm) {
pr_err("BUG: get_default_drbg_ctx() found wrong crypto_default_rng \"%s\"\n", crypto_tfm_alg_driver_name(&current_crypto_default_rng->base));
crypto_put_default_rng();
return NULL;
}
return current_crypto_default_rng;
}
static inline struct wc_linuxkm_drbg_ctx *get_default_drbg_ctx(void) {
struct crypto_rng *current_crypto_default_rng = get_crypto_default_rng();
return current_crypto_default_rng ? (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(current_crypto_default_rng) : NULL;
}
static int wc__get_random_bytes(void *buf, size_t len)
{
struct crypto_rng *current_crypto_default_rng = get_crypto_default_rng();
if (! current_crypto_default_rng)
return -EFAULT;
else {
int ret = crypto_rng_get_bytes(current_crypto_default_rng, buf, len);
if (ret)
pr_warn("BUG: wc_get_random_bytes falling through to native get_random_bytes with wc_linuxkm_drbg_default_instance_registered, ret=%d.", ret);
return ret;
}
__builtin_unreachable();
}
/* used by kernel >=5.14.0 */
static ssize_t wc_get_random_bytes_user(struct iov_iter *iter) {
struct crypto_rng *current_crypto_default_rng;
if (unlikely(!iov_iter_count(iter)))
return 0;
current_crypto_default_rng = get_crypto_default_rng();
if (! current_crypto_default_rng)
return -ECANCELED;
else {
ssize_t ret;
size_t this_copied, total_copied = 0;
byte block[WC_SHA256_BLOCK_SIZE];
for (;;) {
ret = (ssize_t)crypto_rng_get_bytes(current_crypto_default_rng, block, sizeof block);
if (unlikely(ret != 0)) {
pr_err("ERROR: wc_get_random_bytes_user() crypto_rng_get_bytes() returned %ld.", ret);
break;
}
/* note copy_to_iter() cannot be safely executed with
* DISABLE_VECTOR_REGISTERS() or kprobes status, i.e.
* irq_count() must be zero here.
*/
this_copied = copy_to_iter(block, sizeof(block), iter);
total_copied += this_copied;
if (!iov_iter_count(iter) || this_copied != sizeof(block))
break;
wc_static_assert(PAGE_SIZE % sizeof(block) == 0);
if (total_copied % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
cond_resched();
}
}
ForceZero(block, sizeof(block));
if (total_copied == 0) {
if (ret == 0)
ret = -EFAULT;
else
ret = -ECANCELED;
}
if (ret == 0)
ret = (ssize_t)total_copied;
return ret;
}
__builtin_unreachable();
}
/* used by kernel 4.9.0-5.13.x */
static ssize_t wc_extract_crng_user(void __user *buf, size_t nbytes) {
struct crypto_rng *current_crypto_default_rng;
if (unlikely(!nbytes))
return 0;
current_crypto_default_rng = get_crypto_default_rng();
if (! current_crypto_default_rng)
return -ECANCELED;
else {
ssize_t ret;
size_t this_copied, total_copied = 0;
byte block[WC_SHA256_BLOCK_SIZE];
for (;;) {
ret = (ssize_t)crypto_rng_get_bytes(current_crypto_default_rng, block, sizeof block);
if (unlikely(ret != 0)) {
pr_err("ERROR: wc_extract_crng_user() crypto_rng_get_bytes() returned %ld.", ret);
break;
}
this_copied = min(nbytes - total_copied, sizeof(block));
if (copy_to_user((byte *)buf + total_copied, block, this_copied)) {
ret = -EFAULT;
break;
}
total_copied += this_copied;
if (this_copied != sizeof(block))
break;
wc_static_assert(PAGE_SIZE % sizeof(block) == 0);
if (total_copied % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
cond_resched();
}
}
ForceZero(block, sizeof(block));
if ((total_copied == 0) && (ret == 0)) {
ret = -ECANCELED;
}
if (ret == 0)
ret = (ssize_t)total_copied;
return ret;
}
__builtin_unreachable();
}
static int wc_mix_pool_bytes(const void *buf, size_t len) {
struct wc_linuxkm_drbg_ctx *ctx;
size_t i;
int n;
if (len == 0)
return 0;
if (! (ctx = get_default_drbg_ctx()))
return -EFAULT;
for (n = nr_cpu_ids - 1; n >= 0; --n) {
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
int V_offset = 0;
for (i = 0; i < len; ++i) {
((struct DRBG_internal *)drbg->rng.drbg)->V[V_offset++] += ((byte *)buf)[i];
if (V_offset == (int)sizeof ((struct DRBG_internal *)drbg->rng.drbg)->V)
V_offset = 0;
}
put_drbg(drbg);
}
return 0;
}
static int wc_crng_reseed(void) {
struct wc_linuxkm_drbg_ctx *ctx = get_default_drbg_ctx();
int n;
int can_sleep = (preempt_count() == 0);
if (! ctx)
return -EFAULT;
for (n = nr_cpu_ids - 1; n >= 0; --n) {
struct wc_rng_inst *drbg = get_drbg_n(ctx, n);
((struct DRBG_internal *)drbg->rng.drbg)->reseedCtr = WC_RESEED_INTERVAL;
if (can_sleep) {
byte scratch[4];
int need_reenable_vec = (DISABLE_VECTOR_REGISTERS() == 0);
int ret = wc_RNG_GenerateBlock(&drbg->rng, scratch, (word32)sizeof(scratch));
if (need_reenable_vec)
REENABLE_VECTOR_REGISTERS();
if (ret != 0)
pr_err("ERROR: wc_crng_reseed() wc_RNG_GenerateBlock() for DRBG #%d returned %d.", n, ret);
put_drbg(drbg);
cond_resched();
}
}
return 0;
}
struct wolfssl_linuxkm_random_bytes_handlers random_bytes_handlers = {
._get_random_bytes = wc__get_random_bytes,
/* pass handlers for both old and new user-mode rng, and let the kernel
* patch decide which one to use.
*/
.get_random_bytes_user = wc_get_random_bytes_user,
.extract_crng_user = wc_extract_crng_user,
.mix_pool_bytes = wc_mix_pool_bytes,
/* .credit_init_bits not implemented */
.crng_reseed = wc_crng_reseed
};
static int wc_get_random_bytes_callbacks_installed = 0;
#elif defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES)
#ifndef CONFIG_KPROBES
#error WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES without CONFIG_KPROBES.
#endif
#ifndef CONFIG_X86
#error WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES requires CONFIG_X86.
#endif
static int wc_get_random_bytes_by_kprobe(struct kprobe *p, struct pt_regs *regs)
{
void *buf = (void *)regs->di;
size_t len = (size_t)regs->si;
if (wc_linuxkm_drbg_default_instance_registered) {
int ret = crypto_rng_get_bytes(crypto_default_rng, buf, len);
if (ret == 0) {
regs->ip = (unsigned long)p->addr + p->ainsn.size;
return 1; /* Handled. */
}
pr_warn("BUG: wc_get_random_bytes_by_kprobe falling through to native get_random_bytes with wc_linuxkm_drbg_default_instance_registered, ret=%d.", ret);
}
else
pr_warn("BUG: wc_get_random_bytes_by_kprobe called without wc_linuxkm_drbg_default_instance_registered.");
/* Not handled. Fall through to native implementation, given
* that the alternative is an immediate kernel panic.
*
* Because we're jumping straight to the native implementation, we need to
* restore the argument registers first.
*/
asm volatile (
"movq %0, %%rsi\n\t"
"movq %1, %%rdi\n\t"
"pushq %2\n\t" /* Push original flags */
"popfq\n\t" /* Restore flags */
:
: "r" (regs->si),
"r" (regs->di),
"r" (regs->flags)
: "memory"
);
return 0;
}
static struct kprobe wc_get_random_bytes_kprobe = {
.symbol_name = "get_random_bytes",
.pre_handler = wc_get_random_bytes_by_kprobe,
};
static int wc_get_random_bytes_kprobe_installed = 0;
/* note, we can't kprobe _get_random_bytes() because it's inlined. */
#ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE
#warning Interception of /dev/random, /dev/urandom, and getrandom() using \
wc_get_random_bytes_user_kretprobe_enter() is known to destabilize large \
one-shot reads of randomness, due to conflicts with the kretprobe run \
context (uninterruptible). In particular, cryptsetup will fail on \
/dev/urandom reads. When in doubt, patch your kernel, activating \
WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS.
struct wc_get_random_bytes_user_kretprobe_ctx {
unsigned long retval;
};
static int wc_get_random_bytes_user_kretprobe_enter(struct kretprobe_instance *p, struct pt_regs *regs)
{
struct iov_iter *iter = (struct iov_iter *)regs->di;
struct wc_get_random_bytes_user_kretprobe_ctx *ctx = (struct wc_get_random_bytes_user_kretprobe_ctx *)p->data;
int ret;
size_t this_copied = (size_t)(-1L), total_copied = 0;
byte block[WC_SHA256_BLOCK_SIZE];
if (unlikely(!wc_linuxkm_drbg_default_instance_registered)) {
pr_warn("BUG: wc_get_random_bytes_user_kretprobe_enter() without wc_linuxkm_drbg_default_instance_registered.");
ret = -ENOENT;
goto out;
}
if (unlikely(!iov_iter_count(iter))) {
ret = 0;
goto out;
}
for (;;) {
ret = crypto_rng_get_bytes(crypto_default_rng, block, sizeof block);
if (ret != 0) {
pr_err("ERROR: wc_get_random_bytes_user_kretprobe_enter() crypto_rng_get_bytes() returned %d.", ret);
break;
}
/* note, in a kprobe/kretprobe, this can persistently return 0 (no
* progress) with nonzero iov_iter_count(iter).
*/
this_copied = copy_to_iter(block, sizeof(block), iter);
total_copied += this_copied;
if ((!iov_iter_count(iter)) || (this_copied != sizeof block))
break;
wc_static_assert(PAGE_SIZE % sizeof(block) == 0);
/* we are in a kprobe context here, so we can't do any scheduler ops. */
#if 0
if (total_copied % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
cond_resched();
}
#endif
}
ForceZero(block, sizeof(block));
if ((total_copied == 0) && (ret == 0))
total_copied = (size_t)(-EFAULT);
out:
if ((ret != 0) && (this_copied == (size_t)(-1L))) {
/* crypto_rng_get_bytes() failed on the first call, before any update to the iov_iter. */
pr_warn("WARNING: wc_get_random_bytes_user_kretprobe_enter() falling through to native get_random_bytes_user().");
return -EFAULT;
}
/* if any progress was made, report that progress. crypto_rng_get_bytes()
* failing after some progress is benign.
*/
regs->ax = ctx->retval = total_copied;
/* skip the native get_random_bytes_user() by telling kprobes to jump
* straight to the return address.
*/
regs->ip = (unsigned long)get_kretprobe_retaddr(p);
/* return 0 to tell kprobes that the handler succeeded, so that
* wc_get_random_bytes_user_kretprobe_exit() will be called -- fixing up the
* return value (regs->ax) is necessary.
*/
return 0;
}
static int wc_get_random_bytes_user_kretprobe_exit(struct kretprobe_instance *p, struct pt_regs *regs)
{
struct wc_get_random_bytes_user_kretprobe_ctx *ctx = (struct wc_get_random_bytes_user_kretprobe_ctx *)p->data;
if (unlikely(!wc_linuxkm_drbg_default_instance_registered)) {
pr_warn("BUG: wc_get_random_bytes_user_kretprobe_exit without wc_linuxkm_drbg_default_instance_registered.");
return -EFAULT;
}
regs->ax = ctx->retval;
return 0;
}
static struct kretprobe wc_get_random_bytes_user_kretprobe = {
.kp.symbol_name = "get_random_bytes_user",
.entry_handler = wc_get_random_bytes_user_kretprobe_enter,
.handler = wc_get_random_bytes_user_kretprobe_exit,
.data_size = sizeof(struct wc_get_random_bytes_user_kretprobe_ctx)
};
static int wc_get_random_bytes_user_kretprobe_installed = 0;
#endif /* WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE */
#else /* !WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS && !(CONFIG_KPROBES && CONFIG_X86) */
#error LINUXKM_DRBG_GET_RANDOM_BYTES implementation missing for target architecture/configuration.
#endif
#endif /* LINUXKM_DRBG_GET_RANDOM_BYTES */
static int wc_linuxkm_drbg_startup(void)
{
int ret;
#ifdef LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
@@ -1113,7 +1626,7 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
#endif
if (wc_linuxkm_drbg_loaded) {
pr_err("wc_linuxkm_drbg_set_default called with wc_linuxkm_drbg_loaded.");
pr_err("ERROR: wc_linuxkm_drbg_set_default called with wc_linuxkm_drbg_loaded.");
return -EBUSY;
}
@@ -1127,14 +1640,14 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
ret = crypto_register_rng(&wc_linuxkm_drbg);
if (ret != 0) {
pr_err("crypto_register_rng: %d", ret);
pr_err("ERROR: crypto_register_rng: %d", ret);
return ret;
}
{
struct crypto_rng *tfm = crypto_alloc_rng(wc_linuxkm_drbg.base.cra_name, 0, 0);
if (IS_ERR(tfm)) {
pr_err("error: allocating rng algorithm %s failed: %ld\n",
pr_err("ERROR: allocating rng algorithm %s failed: %ld\n",
wc_linuxkm_drbg.base.cra_name, PTR_ERR(tfm));
ret = PTR_ERR(tfm);
tfm = NULL;
@@ -1145,7 +1658,7 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
if (! ret) {
const char *actual_driver_name = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
if (strcmp(actual_driver_name, wc_linuxkm_drbg.base.cra_driver_name)) {
pr_err("error: unexpected implementation for %s: %s (expected %s)\n",
pr_err("ERROR: unexpected implementation for %s: %s (expected %s)\n",
wc_linuxkm_drbg.base.cra_name,
actual_driver_name,
wc_linuxkm_drbg.base.cra_driver_name);
@@ -1196,7 +1709,7 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
}
if (ret)
pr_err("wc_linuxkm_drbg_startup: PRNG quality test failed, block length %d, iters %d, ret %d",
pr_err("ERROR: wc_linuxkm_drbg_startup: PRNG quality test failed, block length %d, iters %d, ret %d",
i, j, ret);
}
}
@@ -1216,57 +1729,134 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_startup(void)
WOLFKM_INSTALL_NOTICE(wc_linuxkm_drbg);
#ifdef LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
/* for the default RNG, make sure we don't cache an underlying SHA256
* method that uses vector insns (forbidden from irq handlers).
*/
wc_linuxkm_drbg_init_tfm_disable_vector_registers = 1;
ret = crypto_del_default_rng();
if (ret) {
pr_err("crypto_del_default_rng returned %d", ret);
wc_linuxkm_drbg_init_tfm_disable_vector_registers = 0;
pr_err("ERROR: crypto_del_default_rng returned %d", ret);
return ret;
}
ret = crypto_get_default_rng();
wc_linuxkm_drbg_init_tfm_disable_vector_registers = 0;
if (ret) {
pr_err("crypto_get_default_rng returned %d", ret);
pr_err("ERROR: crypto_get_default_rng returned %d", ret);
return ret;
}
cur_refcnt = WC_LKM_REFCOUNT_TO_INT(wc_linuxkm_drbg.base.cra_refcnt);
if (cur_refcnt < 2) {
pr_err("wc_linuxkm_drbg refcnt = %d after crypto_get_default_rng()", cur_refcnt);
pr_err("ERROR: wc_linuxkm_drbg refcnt = %d after crypto_get_default_rng()", cur_refcnt);
crypto_put_default_rng();
return -EINVAL;
}
if (! crypto_default_rng) {
pr_err("crypto_default_rng is null");
pr_err("ERROR: crypto_default_rng is null");
crypto_put_default_rng();
return -EINVAL;
}
if (strcmp(crypto_tfm_alg_driver_name(&crypto_default_rng->base), wc_linuxkm_drbg.base.cra_driver_name) == 0) {
crypto_put_default_rng();
wc_linuxkm_drbg_default_instance_registered = 1;
pr_info("%s registered as systemwide default stdrng.", wc_linuxkm_drbg.base.cra_driver_name);
pr_info("to unload module, first echo 1 > /sys/module/libwolfssl/deinstall_algs");
}
else {
pr_err("%s NOT registered as systemwide default stdrng -- found \"%s\".", wc_linuxkm_drbg.base.cra_driver_name, crypto_tfm_alg_driver_name(&crypto_default_rng->base));
if (crypto_default_rng->base.__crt_alg->cra_init != wc_linuxkm_drbg_init_tfm) {
pr_err("ERROR: %s NOT registered as systemwide default stdrng -- found \"%s\".", wc_linuxkm_drbg.base.cra_driver_name, crypto_tfm_alg_driver_name(&crypto_default_rng->base));
crypto_put_default_rng();
return -EINVAL;
}
crypto_put_default_rng();
wc_linuxkm_drbg_default_instance_registered = 1;
pr_info("%s registered as systemwide default stdrng.", wc_linuxkm_drbg.base.cra_driver_name);
pr_info("libwolfssl: to unload module, first echo 1 > /sys/module/libwolfssl/deinstall_algs");
#ifdef LINUXKM_DRBG_GET_RANDOM_BYTES
#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
ret = wolfssl_linuxkm_register_random_bytes_handlers(
THIS_MODULE,
&random_bytes_handlers);
if (ret == 0) {
wc_get_random_bytes_callbacks_installed = 1;
pr_info("libwolfssl: kernel global random_bytes handlers installed.");
}
else {
pr_err("ERROR: wolfssl_linuxkm_register_random_bytes_handlers() failed: %d\n", ret);
}
#elif defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES)
ret = register_kprobe(&wc_get_random_bytes_kprobe);
if (ret == 0) {
wc_get_random_bytes_kprobe_installed = 1;
pr_info("libwolfssl: wc_get_random_bytes_kprobe installed\n");
}
else {
pr_err("ERROR: wc_get_random_bytes_kprobe installation failed: %d\n", ret);
}
#ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE
ret = register_kretprobe(&wc_get_random_bytes_user_kretprobe);
if (ret == 0) {
wc_get_random_bytes_user_kretprobe_installed = 1;
pr_info("libwolfssl: wc_get_random_bytes_user_kretprobe installed\n");
}
else {
pr_err("ERROR: wc_get_random_bytes_user_kprobe installation failed: %d\n", ret);
}
#endif /* WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE */
#else
#error LINUXKM_DRBG_GET_RANDOM_BYTES missing installation calls.
#endif
#ifdef DEBUG_DRBG_RESEEDS
{
byte scratch[4];
ret = wc__get_random_bytes(scratch, sizeof(scratch));
if (ret != 0) {
pr_err("ERROR: wc__get_random_bytes() returned %d", ret);
return -EINVAL;
}
ret = wc_mix_pool_bytes(scratch, sizeof(scratch));
if (ret != 0) {
pr_err("ERROR: wc_mix_pool_bytes() returned %d", ret);
return -EINVAL;
}
ret = wc_crng_reseed();
if (ret != 0) {
pr_err("ERROR: wc_crng_reseed() returned %d", ret);
return -EINVAL;
}
ret = wc__get_random_bytes(scratch, sizeof(scratch));
if (ret != 0) {
pr_err("ERROR: wc__get_random_bytes() returned %d", ret);
return -EINVAL;
}
}
#endif
#endif /* LINUXKM_DRBG_GET_RANDOM_BYTES */
#endif /* LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT */
return 0;
}
WC_MAYBE_UNUSED static int wc_linuxkm_drbg_cleanup(void) {
static int wc_linuxkm_drbg_cleanup(void) {
int cur_refcnt = WC_LKM_REFCOUNT_TO_INT(wc_linuxkm_drbg.base.cra_refcnt);
if (! wc_linuxkm_drbg_loaded) {
pr_err("wc_linuxkm_drbg_cleanup called with ! wc_linuxkm_drbg_loaded");
pr_err("ERROR: wc_linuxkm_drbg_cleanup called with ! wc_linuxkm_drbg_loaded");
return -EINVAL;
}
if (cur_refcnt - wc_linuxkm_drbg_default_instance_registered != 1) {
pr_err("wc_linuxkm_drbg_cleanup called with refcnt = %d, with wc_linuxkm_drbg %sset as default rng",
pr_err("ERROR: wc_linuxkm_drbg_cleanup called with refcnt = %d, with wc_linuxkm_drbg %sset as default rng",
cur_refcnt, wc_linuxkm_drbg_default_instance_registered ? "" : "not ");
return -EBUSY;
}
@@ -1277,14 +1867,58 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_cleanup(void) {
#ifdef LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT
if (wc_linuxkm_drbg_default_instance_registered) {
int ret = crypto_del_default_rng();
int ret;
#ifdef LINUXKM_DRBG_GET_RANDOM_BYTES
/* we need to unregister the get_random_bytes handlers first to remove
* the chance that a caller will race with the crypto_unregister_rng()
* below.
*/
#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
if (wc_get_random_bytes_callbacks_installed) {
ret = wolfssl_linuxkm_unregister_random_bytes_handlers();
if (ret != 0) {
pr_err("ERROR: wolfssl_linuxkm_unregister_random_bytes_handlers returned %d", ret);
return ret;
}
pr_info("libwolfssl: kernel global random_bytes handlers uninstalled\n");
wc_get_random_bytes_callbacks_installed = 0;
}
#elif defined(WOLFSSL_LINUXKM_USE_GET_RANDOM_KPROBES)
if (wc_get_random_bytes_kprobe_installed) {
wc_get_random_bytes_kprobe_installed = 0;
barrier();
unregister_kprobe(&wc_get_random_bytes_kprobe);
pr_info("libwolfssl: wc_get_random_bytes_kprobe uninstalled\n");
}
#ifdef WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE
if (wc_get_random_bytes_user_kretprobe_installed) {
wc_get_random_bytes_user_kretprobe_installed = 0;
barrier();
unregister_kretprobe(&wc_get_random_bytes_user_kretprobe);
pr_info("libwolfssl: wc_get_random_bytes_user_kretprobe uninstalled\n");
}
#endif /* WOLFSSL_LINUXKM_USE_GET_RANDOM_USER_KRETPROBE */
#else
#error LINUXKM_DRBG_GET_RANDOM_BYTES missing deinstallation calls.
#endif
#endif /* LINUXKM_DRBG_GET_RANDOM_BYTES */
ret = crypto_del_default_rng();
if (ret) {
pr_err("crypto_del_default_rng failed: %d", ret);
pr_err("ERROR: crypto_del_default_rng failed: %d", ret);
return ret;
}
cur_refcnt = WC_LKM_REFCOUNT_TO_INT(wc_linuxkm_drbg.base.cra_refcnt);
if (cur_refcnt != 1) {
pr_err("wc_linuxkm_drbg refcnt = %d after crypto_del_default_rng()", cur_refcnt);
pr_warn("WARNING: wc_linuxkm_drbg refcnt = %d after crypto_del_default_rng()", cur_refcnt);
return -EINVAL;
}
}
@@ -1293,7 +1927,7 @@ WC_MAYBE_UNUSED static int wc_linuxkm_drbg_cleanup(void) {
crypto_unregister_rng(&wc_linuxkm_drbg);
if (! (wc_linuxkm_drbg.base.cra_flags & CRYPTO_ALG_DEAD)) {
pr_err("wc_linuxkm_drbg_cleanup: after crypto_unregister_rng, wc_linuxkm_drbg isn't dead.");
pr_warn("WARNING: wc_linuxkm_drbg_cleanup: after crypto_unregister_rng, wc_linuxkm_drbg isn't dead.");
return -EBUSY;
}

View File

@@ -20,12 +20,8 @@
*/
#ifndef WOLFSSL_LICENSE
#ifdef WOLFSSL_COMMERCIAL_LICENSE
#define WOLFSSL_LICENSE "wolfSSL Commercial"
#else
#define WOLFSSL_LICENSE "GPL v2"
#endif
#endif
#define WOLFSSL_LINUXKM_NEED_LINUX_CURRENT
@@ -37,6 +33,9 @@
#include <wolfssl/ssl.h>
#endif
#ifdef HAVE_FIPS
#ifdef USE_CONTESTMUTEX
#error USE_CONTESTMUTEX is incompatible with WOLFSSL_LINUXKM
#endif
#include <wolfssl/wolfcrypt/fips_test.h>
#endif
#if !defined(NO_CRYPT_TEST) || defined(LINUXKM_LKCAPI_REGISTER)
@@ -454,22 +453,14 @@ static struct task_struct *my_get_current_thread(void) {
return get_current();
}
#if defined(WOLFSSL_LINUXKM_SIMD_X86) && defined(WOLFSSL_COMMERCIAL_LICENSE)
/* ditto for fpregs_lock/fpregs_unlock */
#ifdef WOLFSSL_LINUXKM_USE_SAVE_VECTOR_REGISTERS
static void my_fpregs_lock(void) {
fpregs_lock();
/* preempt_count() is an inline function in arch/x86/include/asm/preempt.h that
* accesses __preempt_count, which is an int array declared with
* DECLARE_PER_CPU_CACHE_HOT.
*/
static int my_preempt_count(void) {
return preempt_count();
}
static void my_fpregs_unlock(void) {
fpregs_unlock();
}
#endif /* WOLFSSL_LINUXKM_SIMD_X86 && WOLFSSL_COMMERCIAL_LICENSE */
#endif /* USE_WOLFSSL_LINUXKM_PIE_REDIRECT_TABLE */
static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
memset(
&wolfssl_linuxkm_pie_redirect_table,
@@ -666,6 +657,12 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
wolfssl_linuxkm_pie_redirect_table.dump_stack = dump_stack;
#endif
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
wolfssl_linuxkm_pie_redirect_table._raw_spin_lock_irqsave = _raw_spin_lock_irqsave;
wolfssl_linuxkm_pie_redirect_table._raw_spin_trylock = _raw_spin_trylock;
wolfssl_linuxkm_pie_redirect_table._raw_spin_unlock_irqrestore = _raw_spin_unlock_irqrestore;
wolfssl_linuxkm_pie_redirect_table._cond_resched = _cond_resched;
#ifdef CONFIG_ARM64
wolfssl_linuxkm_pie_redirect_table.alt_cb_patch_nops = alt_cb_patch_nops;
#endif

View File

@@ -0,0 +1,463 @@
--- ./drivers/char/random.c.dist 2020-12-13 16:41:30.000000000 -0600
+++ ./drivers/char/random.c 2025-07-02 11:59:07.220250957 -0500
@@ -344,6 +344,260 @@
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+
+#include <linux/delay.h>
+
+static atomic_long_t random_bytes_cb_owner =
+ ATOMIC_INIT((long)NULL);
+static atomic_t random_bytes_cb_refcnt =
+ ATOMIC_INIT(0); /* 0 if unregistered, 1 if no calls in flight. */
+static _get_random_bytes_cb_t _get_random_bytes_cb = NULL;
+static extract_crng_user_cb_t extract_crng_user_cb = NULL;
+static crng_ready_cb_t crng_ready_cb = NULL;
+static mix_pool_bytes_cb_t mix_pool_bytes_cb = NULL;
+static credit_init_bits_cb_t credit_init_bits_cb = NULL;
+static crng_reseed_cb_t crng_reseed_cb = NULL;
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers)
+{
+ if ((! new_random_bytes_cb_owner) ||
+ (! handlers) ||
+ (! handlers->_get_random_bytes) ||
+ (! handlers->extract_crng_user))
+ {
+ return -EINVAL;
+ }
+
+ /* random_bytes_cb_owner is used to enforce serialization of
+ * wolfssl_register_random_bytes_handlers() and
+ * wolfssl_unregister_random_bytes_handlers().
+ */
+ if (atomic_long_cmpxchg(&random_bytes_cb_owner,
+ (long)NULL,
+ (long)new_random_bytes_cb_owner)
+ != (long)NULL)
+ {
+ return -EBUSY;
+ }
+
+ {
+ int current_random_bytes_cb_refcnt = atomic_read(&random_bytes_cb_refcnt);
+ if (current_random_bytes_cb_refcnt) {
+ pr_err("BUG: random_bytes_cb_refcnt == %d with null random_bytes_cb_owner", current_random_bytes_cb_refcnt);
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -EFAULT;
+ }
+ }
+
+ if (! try_module_get(new_random_bytes_cb_owner)) {
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -ENODEV;
+ }
+
+ _get_random_bytes_cb = handlers->_get_random_bytes;
+ extract_crng_user_cb = handlers->extract_crng_user;
+ crng_ready_cb = handlers->crng_ready;
+ mix_pool_bytes_cb = handlers->mix_pool_bytes;
+ credit_init_bits_cb = handlers->credit_init_bits;
+ crng_reseed_cb = handlers->crng_reseed;
+
+ barrier();
+ atomic_set_release(&random_bytes_cb_refcnt, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_register_random_bytes_handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void)
+{
+ int current_random_bytes_cb_refcnt;
+ int n_tries;
+ if (! atomic_long_read(&random_bytes_cb_owner))
+ return -ENODEV;
+
+ /* we're racing the kernel at large to try to catch random_bytes_cb_refcnt
+ * with no callers in flight -- retry and relax up to 100 times.
+ */
+ for (n_tries = 0; n_tries < 100; ++n_tries) {
+ current_random_bytes_cb_refcnt = atomic_cmpxchg(&random_bytes_cb_refcnt, 1, 0);
+ if (current_random_bytes_cb_refcnt == 1)
+ break;
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in wolfssl_linuxkm_unregister_random_bytes_handlers.", current_random_bytes_cb_refcnt);
+ break;
+ }
+ if (msleep_interruptible(10) != 0)
+ return -EINTR;
+ }
+ if (current_random_bytes_cb_refcnt != 1) {
+ pr_warn("WARNING: wolfssl_unregister_random_bytes_handlers called with random_bytes_cb_refcnt == %d", current_random_bytes_cb_refcnt);
+ return -EBUSY;
+ }
+
+ _get_random_bytes_cb = NULL;
+ extract_crng_user_cb = NULL;
+ crng_ready_cb = NULL;
+ mix_pool_bytes_cb = NULL;
+ credit_init_bits_cb = NULL;
+ crng_reseed_cb = NULL;
+
+ module_put((struct module *)atomic_long_read(&random_bytes_cb_owner));
+ barrier();
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_unregister_random_bytes_handlers);
+
+static __always_inline int reserve_random_bytes_cb(void) {
+ int current_random_bytes_cb_refcnt =
+ atomic_read_acquire(&random_bytes_cb_refcnt);
+
+ if (! current_random_bytes_cb_refcnt)
+ return -ENODEV;
+
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in reserve_random_bytes_cb.", current_random_bytes_cb_refcnt);
+ return -EFAULT;
+ }
+
+ for (;;) {
+ int orig_random_bytes_cb_refcnt =
+ atomic_cmpxchg(
+ &random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt + 1);
+ if (orig_random_bytes_cb_refcnt == current_random_bytes_cb_refcnt)
+ return 0;
+ else if (! orig_random_bytes_cb_refcnt)
+ return -ENODEV;
+ else
+ current_random_bytes_cb_refcnt = orig_random_bytes_cb_refcnt;
+ }
+
+ __builtin_unreachable();
+}
+
+static __always_inline void release_random_bytes_cb(void) {
+ atomic_dec(&random_bytes_cb_refcnt);
+}
+
+static inline int call__get_random_bytes_cb(void *buf, size_t len)
+{
+ int ret;
+
+ if (! _get_random_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = _get_random_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline ssize_t call_extract_crng_user_cb(void __user *buf, size_t nbytes)
+{
+ ssize_t ret;
+
+ if (! extract_crng_user_cb)
+ return -ECANCELED;
+
+ ret = (ssize_t)reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = extract_crng_user_cb(buf, nbytes);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline bool call_crng_ready_cb(void)
+{
+ bool ret;
+
+ /* Null crng_ready_cb signifies that the DRBG is always ready, i.e. that if
+ * called, it will always have or obtain sufficient entropy to fulfill the
+ * call.
+ */
+ if (! crng_ready_cb)
+ return 1;
+
+ if (reserve_random_bytes_cb() != 0)
+ return 0;
+
+ ret = crng_ready_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_mix_pool_bytes_cb(const void *buf, size_t len)
+{
+ int ret;
+
+ if (! mix_pool_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = mix_pool_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_credit_init_bits_cb(size_t bits)
+{
+ int ret;
+
+ if (! credit_init_bits_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = credit_init_bits_cb(bits);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_crng_reseed_cb(void)
+{
+ int ret;
+
+ if (! crng_reseed_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = crng_reseed_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+#endif /* WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS */
+
#define CREATE_TRACE_POINTS
#include <trace/events/random.h>
@@ -461,7 +715,22 @@ static struct crng_state primary_crng =
* its value (from 0->1->2).
*/
static int crng_init = 0;
+
#define crng_ready() (likely(crng_init > 1))
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
@@ -593,6 +862,11 @@ static void mix_pool_bytes(struct entrop
{
unsigned long flags;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_mix_pool_bytes_cb(in, nbytes);
+ /* fall through to mix into native pool too. */
+#endif
+
trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
spin_lock_irqsave(&r->lock, flags);
_mix_pool_bytes(r, in, nbytes);
@@ -664,6 +938,10 @@ static void credit_entropy_bits(struct e
const int pool_size = r->poolinfo->poolfracbits;
int nfrac = nbits << ENTROPY_SHIFT;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_credit_init_bits_cb(nbits);
+#endif
+
if (!nbits)
return;
@@ -1069,6 +1347,18 @@ static ssize_t extract_crng_user(void __
__u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ {
+ ssize_t cb_ret = call_extract_crng_user_cb(buf, nbytes);
+ /* If the callback returns -ECANCELED, that signals that iter is
+ * still intact, and flow can safely fall through to the native
+ * implementation.
+ */
+ if (cb_ret != -ECANCELED)
+ return cb_ret;
+ }
+#endif
+
while (nbytes) {
if (large_request && need_resched()) {
if (signal_pending(current)) {
@@ -1523,7 +1813,7 @@ static void _warn_unseeded_randomness(co
#endif
if (print_once ||
- crng_ready() ||
+ crng_ready_maybe_cb() ||
(previous && (caller == READ_ONCE(*previous))))
return;
WRITE_ONCE(*previous, caller);
@@ -1552,6 +1842,14 @@ static void _get_random_bytes(void *buf,
trace_get_random_bytes(nbytes, _RET_IP_);
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* If call__get_random_bytes_cb() doesn't succeed, flow falls through to
+ * the native implementation. _get_random_bytes() must succeed.
+ */
+ if (call__get_random_bytes_cb(buf, nbytes) == 0)
+ return;
+#endif
+
while (nbytes >= CHACHA_BLOCK_SIZE) {
extract_crng(buf);
buf += CHACHA_BLOCK_SIZE;
@@ -1638,12 +1936,12 @@ static void try_to_generate_entropy(void
*/
int wait_for_random_bytes(void)
{
- if (likely(crng_ready()))
+ if (likely(crng_ready_maybe_cb()))
return 0;
do {
int ret;
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready_maybe_cb(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
@@ -1665,7 +1963,7 @@ EXPORT_SYMBOL(wait_for_random_bytes);
*/
bool rng_is_initialized(void)
{
- return crng_ready();
+ return crng_ready_maybe_cb();
}
EXPORT_SYMBOL(rng_is_initialized);
@@ -1843,7 +2141,7 @@ urandom_read(struct file *file, char __u
unsigned long flags;
static int maxwarn = 10;
- if (!crng_ready() && maxwarn > 0) {
+ if (!crng_ready_maybe_cb() && maxwarn > 0) {
maxwarn--;
if (__ratelimit(&urandom_warning))
pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
@@ -1872,6 +2170,11 @@ random_poll(struct file *file, poll_tabl
{
__poll_t mask;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ if (crng_ready_by_cb())
+ return EPOLLIN | EPOLLRDNORM;
+#endif
+
poll_wait(file, &crng_init_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
@@ -1970,6 +2273,16 @@ static long random_ioctl(struct file *f,
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* fall through to reseed native crng too. */
+ if (call_crng_reseed_cb() == 0) {
+ if (crng_init >= 2) {
+ crng_reseed(&primary_crng, &input_pool);
+ crng_global_init_time = jiffies - 1;
+ }
+ return 0;
+ }
+#endif
if (crng_init < 2)
return -ENODATA;
crng_reseed(&primary_crng, NULL);
@@ -2022,7 +2335,7 @@ SYSCALL_DEFINE3(getrandom, char __user *
if (count > INT_MAX)
count = INT_MAX;
- if (!(flags & GRND_INSECURE) && !crng_ready()) {
+ if (!(flags & GRND_INSECURE) && !crng_ready_maybe_cb()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
--- ./include/linux/random.h.dist 2020-12-13 16:41:30.000000000 -0600
+++ ./include/linux/random.h 2025-06-30 12:05:59.106440700 -0500
@@ -158,4 +158,37 @@ static inline bool __init arch_get_rando
}
#endif
+#ifndef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS 1
+#endif
+
+typedef int (*_get_random_bytes_cb_t)(void *buf, size_t len);
+struct iov_iter;
+/* kernels >= 5.17.0 use get_random_bytes_user() */
+typedef ssize_t (*get_random_bytes_user_cb_t)(struct iov_iter *iter);
+/* kernels < 5.17.0 use extract_crng_user(), though some LTS kernels,
+ * e.g. 5.10.236, have the 5.17+ architecture backported.
+ */
+typedef ssize_t (*extract_crng_user_cb_t)(void __user *buf, size_t nbytes);
+typedef bool (*crng_ready_cb_t)(void);
+typedef int (*mix_pool_bytes_cb_t)(const void *buf, size_t len);
+typedef int (*credit_init_bits_cb_t)(size_t bits);
+typedef int (*crng_reseed_cb_t)(void);
+
+struct wolfssl_linuxkm_random_bytes_handlers {
+ _get_random_bytes_cb_t _get_random_bytes;
+ get_random_bytes_user_cb_t get_random_bytes_user;
+ extract_crng_user_cb_t extract_crng_user;
+ crng_ready_cb_t crng_ready;
+ mix_pool_bytes_cb_t mix_pool_bytes;
+ credit_init_bits_cb_t credit_init_bits;
+ crng_reseed_cb_t crng_reseed;
+};
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void);
+
#endif /* _LINUX_RANDOM_H */

View File

@@ -0,0 +1,462 @@
--- ./drivers/char/random.c.dist 2025-04-29 18:54:03.390121890 -0500
+++ ./drivers/char/random.c 2025-07-02 11:57:40.176497765 -0500
@@ -60,6 +60,260 @@
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+
+#include <linux/delay.h>
+
+static atomic_long_t random_bytes_cb_owner =
+ ATOMIC_INIT((long)NULL);
+static atomic_t random_bytes_cb_refcnt =
+ ATOMIC_INIT(0); /* 0 if unregistered, 1 if no calls in flight. */
+static _get_random_bytes_cb_t _get_random_bytes_cb = NULL;
+static get_random_bytes_user_cb_t get_random_bytes_user_cb = NULL;
+static crng_ready_cb_t crng_ready_cb = NULL;
+static mix_pool_bytes_cb_t mix_pool_bytes_cb = NULL;
+static credit_init_bits_cb_t credit_init_bits_cb = NULL;
+static crng_reseed_cb_t crng_reseed_cb = NULL;
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers)
+{
+ if ((! new_random_bytes_cb_owner) ||
+ (! handlers) ||
+ (! handlers->_get_random_bytes) ||
+ (! handlers->get_random_bytes_user))
+ {
+ return -EINVAL;
+ }
+
+ /* random_bytes_cb_owner is used to enforce serialization of
+ * wolfssl_register_random_bytes_handlers() and
+ * wolfssl_unregister_random_bytes_handlers().
+ */
+ if (atomic_long_cmpxchg(&random_bytes_cb_owner,
+ (long)NULL,
+ (long)new_random_bytes_cb_owner)
+ != (long)NULL)
+ {
+ return -EBUSY;
+ }
+
+ {
+ int current_random_bytes_cb_refcnt = atomic_read(&random_bytes_cb_refcnt);
+ if (current_random_bytes_cb_refcnt) {
+ pr_err("BUG: random_bytes_cb_refcnt == %d with null random_bytes_cb_owner", current_random_bytes_cb_refcnt);
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -EFAULT;
+ }
+ }
+
+ if (! try_module_get(new_random_bytes_cb_owner)) {
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -ENODEV;
+ }
+
+ _get_random_bytes_cb = handlers->_get_random_bytes;
+ get_random_bytes_user_cb = handlers->get_random_bytes_user;
+ crng_ready_cb = handlers->crng_ready;
+ mix_pool_bytes_cb = handlers->mix_pool_bytes;
+ credit_init_bits_cb = handlers->credit_init_bits;
+ crng_reseed_cb = handlers->crng_reseed;
+
+ barrier();
+ atomic_set_release(&random_bytes_cb_refcnt, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_register_random_bytes_handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void)
+{
+ int current_random_bytes_cb_refcnt;
+ int n_tries;
+ if (! atomic_long_read(&random_bytes_cb_owner))
+ return -ENODEV;
+
+ /* we're racing the kernel at large to try to catch random_bytes_cb_refcnt
+ * with no callers in flight -- retry and relax up to 100 times.
+ */
+ for (n_tries = 0; n_tries < 100; ++n_tries) {
+ current_random_bytes_cb_refcnt = atomic_cmpxchg(&random_bytes_cb_refcnt, 1, 0);
+ if (current_random_bytes_cb_refcnt == 1)
+ break;
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in wolfssl_linuxkm_unregister_random_bytes_handlers.", current_random_bytes_cb_refcnt);
+ break;
+ }
+ if (msleep_interruptible(10) != 0)
+ return -EINTR;
+ }
+ if (current_random_bytes_cb_refcnt != 1) {
+ pr_warn("WARNING: wolfssl_unregister_random_bytes_handlers called with random_bytes_cb_refcnt == %d", current_random_bytes_cb_refcnt);
+ return -EBUSY;
+ }
+
+ _get_random_bytes_cb = NULL;
+ get_random_bytes_user_cb = NULL;
+ crng_ready_cb = NULL;
+ mix_pool_bytes_cb = NULL;
+ credit_init_bits_cb = NULL;
+ crng_reseed_cb = NULL;
+
+ module_put((struct module *)atomic_long_read(&random_bytes_cb_owner));
+ barrier();
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_unregister_random_bytes_handlers);
+
+static __always_inline int reserve_random_bytes_cb(void) {
+ int current_random_bytes_cb_refcnt =
+ atomic_read_acquire(&random_bytes_cb_refcnt);
+
+ if (! current_random_bytes_cb_refcnt)
+ return -ENODEV;
+
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in reserve_random_bytes_cb.", current_random_bytes_cb_refcnt);
+ return -EFAULT;
+ }
+
+ for (;;) {
+ int orig_random_bytes_cb_refcnt =
+ atomic_cmpxchg(
+ &random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt + 1);
+ if (orig_random_bytes_cb_refcnt == current_random_bytes_cb_refcnt)
+ return 0;
+ else if (! orig_random_bytes_cb_refcnt)
+ return -ENODEV;
+ else
+ current_random_bytes_cb_refcnt = orig_random_bytes_cb_refcnt;
+ }
+
+ __builtin_unreachable();
+}
+
+static __always_inline void release_random_bytes_cb(void) {
+ atomic_dec(&random_bytes_cb_refcnt);
+}
+
+static inline int call__get_random_bytes_cb(void *buf, size_t len)
+{
+ int ret;
+
+ if (! _get_random_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = _get_random_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline ssize_t call_get_random_bytes_user_cb(struct iov_iter *iter)
+{
+ ssize_t ret;
+
+ if (! get_random_bytes_user_cb)
+ return -ECANCELED;
+
+ ret = (ssize_t)reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = get_random_bytes_user_cb(iter);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline bool call_crng_ready_cb(void)
+{
+ bool ret;
+
+ /* Null crng_ready_cb signifies that the DRBG is always ready, i.e. that if
+ * called, it will always have or obtain sufficient entropy to fulfill the
+ * call.
+ */
+ if (! crng_ready_cb)
+ return 1;
+
+ if (reserve_random_bytes_cb() != 0)
+ return 0;
+
+ ret = crng_ready_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_mix_pool_bytes_cb(const void *buf, size_t len)
+{
+ int ret;
+
+ if (! mix_pool_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = mix_pool_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_credit_init_bits_cb(size_t bits)
+{
+ int ret;
+
+ if (! credit_init_bits_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = credit_init_bits_cb(bits);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_crng_reseed_cb(void)
+{
+ int ret;
+
+ if (! crng_reseed_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = crng_reseed_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+#endif /* WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS */
+
/*********************************************************************
*
* Initialization and readiness waiting.
@@ -79,7 +333,15 @@ static enum {
CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
} crng_init __read_mostly = CRNG_EMPTY;
+
#define crng_ready() (likely(crng_init >= CRNG_READY))
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
@@ -105,7 +367,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Dis
*/
bool rng_is_initialized(void)
{
- return crng_ready();
+ return crng_ready_maybe_cb();
}
EXPORT_SYMBOL(rng_is_initialized);
@@ -124,11 +386,11 @@ static void try_to_generate_entropy(void
*/
int wait_for_random_bytes(void)
{
- while (!crng_ready()) {
+ while (!crng_ready_maybe_cb()) {
int ret;
try_to_generate_entropy();
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready_maybe_cb(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
}
@@ -182,7 +444,7 @@ static void __cold process_random_ready_
}
#define warn_unseeded_randomness() \
- if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready_maybe_cb()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
__func__, (void *)_RET_IP_, crng_init)
@@ -401,6 +663,14 @@ static void _get_random_bytes(void *buf,
if (!len)
return;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* If call__get_random_bytes_cb() doesn't succeed, flow falls through to
+ * the native implementation. _get_random_bytes() must succeed.
+ */
+ if (call__get_random_bytes_cb(buf, len) == 0)
+ return;
+#endif
+
first_block_len = min_t(size_t, 32, len);
crng_make_state(chacha_state, buf, first_block_len);
len -= first_block_len;
@@ -450,6 +720,18 @@ static ssize_t get_random_bytes_user(str
if (unlikely(!iov_iter_count(iter)))
return 0;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ {
+ ssize_t cb_ret = call_get_random_bytes_user_cb(iter);
+ /* If the callback returns -ECANCELED, that signals that iter is
+ * still intact, and flow can safely fall through to the native
+ * implementation.
+ */
+ if (cb_ret != -ECANCELED)
+ return cb_ret;
+ }
+#endif
+
/*
* Immediately overwrite the ChaCha key at index 4 with random
* bytes, in case userspace causes copy_to_iter() below to sleep
@@ -526,7 +808,7 @@ type get_random_ ##type(void) \
\
warn_unseeded_randomness(); \
\
- if (!crng_ready()) { \
+ if (!crng_ready_maybe_cb()) { \
_get_random_bytes(&ret, sizeof(ret)); \
return ret; \
} \
@@ -650,6 +932,11 @@ static void mix_pool_bytes(const void *b
{
unsigned long flags;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_mix_pool_bytes_cb(buf, len);
+ /* fall through to mix into native pool too. */
+#endif
+
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -701,7 +988,11 @@ static void extract_entropy(void *buf, s
memzero_explicit(&block, sizeof(block));
}
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+#define credit_init_bits(bits) do { (void)call_credit_init_bits_cb(bits); if (!crng_ready()) _credit_init_bits(bits); } while (0)
+#else
#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+#endif
static void __cold _credit_init_bits(size_t bits)
{
@@ -1229,7 +1520,7 @@ SYSCALL_DEFINE3(getrandom, char __user *
if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
- if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (!crng_ready_maybe_cb() && !(flags & GRND_INSECURE)) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
@@ -1245,6 +1536,10 @@ SYSCALL_DEFINE3(getrandom, char __user *
static __poll_t random_poll(struct file *file, poll_table *wait)
{
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ if (crng_ready_by_cb())
+ return EPOLLIN | EPOLLRDNORM;
+#endif
poll_wait(file, &crng_init_wait, wait);
return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
@@ -1286,7 +1581,7 @@ static ssize_t urandom_read_iter(struct
{
static int maxwarn = 10;
- if (!crng_ready()) {
+ if (!crng_ready_maybe_cb()) {
if (!ratelimit_disable && maxwarn <= 0)
++urandom_warning.missed;
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
@@ -1369,6 +1664,14 @@ static long random_ioctl(struct file *f,
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* fall through to reseed native crng too. */
+ if (call_crng_reseed_cb() == 0) {
+ if (crng_ready())
+ crng_reseed();
+ return 0;
+ }
+#endif
if (!crng_ready())
return -ENODATA;
crng_reseed();
--- ./include/linux/random.h.dist 2025-04-29 18:54:07.595202807 -0500
+++ ./include/linux/random.h 2025-06-30 12:03:15.263141842 -0500
@@ -138,4 +138,37 @@ int random_online_cpu(unsigned int cpu);
extern const struct file_operations random_fops, urandom_fops;
#endif
+#ifndef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS 1
+#endif
+
+typedef int (*_get_random_bytes_cb_t)(void *buf, size_t len);
+struct iov_iter;
+/* kernels >= 5.17.0 use get_random_bytes_user() */
+typedef ssize_t (*get_random_bytes_user_cb_t)(struct iov_iter *iter);
+/* kernels < 5.17.0 use extract_crng_user(), though some LTS kernels,
+ * e.g. 5.10.236, have the 5.17+ architecture backported.
+ */
+typedef ssize_t (*extract_crng_user_cb_t)(void __user *buf, size_t nbytes);
+typedef bool (*crng_ready_cb_t)(void);
+typedef int (*mix_pool_bytes_cb_t)(const void *buf, size_t len);
+typedef int (*credit_init_bits_cb_t)(size_t bits);
+typedef int (*crng_reseed_cb_t)(void);
+
+struct wolfssl_linuxkm_random_bytes_handlers {
+ _get_random_bytes_cb_t _get_random_bytes;
+ get_random_bytes_user_cb_t get_random_bytes_user;
+ extract_crng_user_cb_t extract_crng_user;
+ crng_ready_cb_t crng_ready;
+ mix_pool_bytes_cb_t mix_pool_bytes;
+ credit_init_bits_cb_t credit_init_bits;
+ crng_reseed_cb_t crng_reseed;
+};
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void);
+
#endif /* _LINUX_RANDOM_H */

View File

@@ -0,0 +1,453 @@
--- ./drivers/char/random.c.dist 2021-10-31 15:53:10.000000000 -0500
+++ ./drivers/char/random.c 2025-07-02 11:49:13.836320539 -0500
@@ -344,6 +344,260 @@
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+
+#include <linux/delay.h>
+
+static atomic_long_t random_bytes_cb_owner =
+ ATOMIC_INIT((long)NULL);
+static atomic_t random_bytes_cb_refcnt =
+ ATOMIC_INIT(0); /* 0 if unregistered, 1 if no calls in flight. */
+static _get_random_bytes_cb_t _get_random_bytes_cb = NULL;
+static extract_crng_user_cb_t extract_crng_user_cb = NULL;
+static crng_ready_cb_t crng_ready_cb = NULL;
+static mix_pool_bytes_cb_t mix_pool_bytes_cb = NULL;
+static credit_init_bits_cb_t credit_init_bits_cb = NULL;
+static crng_reseed_cb_t crng_reseed_cb = NULL;
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers)
+{
+ if ((! new_random_bytes_cb_owner) ||
+ (! handlers) ||
+ (! handlers->_get_random_bytes) ||
+ (! handlers->extract_crng_user))
+ {
+ return -EINVAL;
+ }
+
+ /* random_bytes_cb_owner is used to enforce serialization of
+ * wolfssl_register_random_bytes_handlers() and
+ * wolfssl_unregister_random_bytes_handlers().
+ */
+ if (atomic_long_cmpxchg(&random_bytes_cb_owner,
+ (long)NULL,
+ (long)new_random_bytes_cb_owner)
+ != (long)NULL)
+ {
+ return -EBUSY;
+ }
+
+ {
+ int current_random_bytes_cb_refcnt = atomic_read(&random_bytes_cb_refcnt);
+ if (current_random_bytes_cb_refcnt) {
+ pr_err("BUG: random_bytes_cb_refcnt == %d with null random_bytes_cb_owner", current_random_bytes_cb_refcnt);
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -EFAULT;
+ }
+ }
+
+ if (! try_module_get(new_random_bytes_cb_owner)) {
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -ENODEV;
+ }
+
+ _get_random_bytes_cb = handlers->_get_random_bytes;
+ extract_crng_user_cb = handlers->extract_crng_user;
+ crng_ready_cb = handlers->crng_ready;
+ mix_pool_bytes_cb = handlers->mix_pool_bytes;
+ credit_init_bits_cb = handlers->credit_init_bits;
+ crng_reseed_cb = handlers->crng_reseed;
+
+ barrier();
+ atomic_set_release(&random_bytes_cb_refcnt, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_register_random_bytes_handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void)
+{
+ int current_random_bytes_cb_refcnt;
+ int n_tries;
+ if (! atomic_long_read(&random_bytes_cb_owner))
+ return -ENODEV;
+
+ /* we're racing the kernel at large to try to catch random_bytes_cb_refcnt
+ * with no callers in flight -- retry and relax up to 100 times.
+ */
+ for (n_tries = 0; n_tries < 100; ++n_tries) {
+ current_random_bytes_cb_refcnt = atomic_cmpxchg(&random_bytes_cb_refcnt, 1, 0);
+ if (current_random_bytes_cb_refcnt == 1)
+ break;
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in wolfssl_linuxkm_unregister_random_bytes_handlers.", current_random_bytes_cb_refcnt);
+ break;
+ }
+ if (msleep_interruptible(10) != 0)
+ return -EINTR;
+ }
+ if (current_random_bytes_cb_refcnt != 1) {
+ pr_warn("WARNING: wolfssl_unregister_random_bytes_handlers called with random_bytes_cb_refcnt == %d", current_random_bytes_cb_refcnt);
+ return -EBUSY;
+ }
+
+ _get_random_bytes_cb = NULL;
+ extract_crng_user_cb = NULL;
+ crng_ready_cb = NULL;
+ mix_pool_bytes_cb = NULL;
+ credit_init_bits_cb = NULL;
+ crng_reseed_cb = NULL;
+
+ module_put((struct module *)atomic_long_read(&random_bytes_cb_owner));
+ barrier();
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_unregister_random_bytes_handlers);
+
+static __always_inline int reserve_random_bytes_cb(void) {
+ int current_random_bytes_cb_refcnt =
+ atomic_read_acquire(&random_bytes_cb_refcnt);
+
+ if (! current_random_bytes_cb_refcnt)
+ return -ENODEV;
+
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in reserve_random_bytes_cb.", current_random_bytes_cb_refcnt);
+ return -EFAULT;
+ }
+
+ for (;;) {
+ int orig_random_bytes_cb_refcnt =
+ atomic_cmpxchg(
+ &random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt + 1);
+ if (orig_random_bytes_cb_refcnt == current_random_bytes_cb_refcnt)
+ return 0;
+ else if (! orig_random_bytes_cb_refcnt)
+ return -ENODEV;
+ else
+ current_random_bytes_cb_refcnt = orig_random_bytes_cb_refcnt;
+ }
+
+ __builtin_unreachable();
+}
+
+static __always_inline void release_random_bytes_cb(void) {
+ atomic_dec(&random_bytes_cb_refcnt);
+}
+
+static inline int call__get_random_bytes_cb(void *buf, size_t len)
+{
+ int ret;
+
+ if (! _get_random_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = _get_random_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline ssize_t call_extract_crng_user_cb(void __user *buf, size_t nbytes)
+{
+ ssize_t ret;
+
+ if (! extract_crng_user_cb)
+ return -ECANCELED;
+
+ ret = (ssize_t)reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = extract_crng_user_cb(buf, nbytes);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline bool call_crng_ready_cb(void)
+{
+ bool ret;
+
+ /* Null crng_ready_cb signifies that the DRBG is always ready, i.e. that if
+ * called, it will always have or obtain sufficient entropy to fulfill the
+ * call.
+ */
+ if (! crng_ready_cb)
+ return 1;
+
+ if (reserve_random_bytes_cb() != 0)
+ return 0;
+
+ ret = crng_ready_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_mix_pool_bytes_cb(const void *buf, size_t len)
+{
+ int ret;
+
+ if (! mix_pool_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = mix_pool_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_credit_init_bits_cb(size_t bits)
+{
+ int ret;
+
+ if (! credit_init_bits_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = credit_init_bits_cb(bits);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_crng_reseed_cb(void)
+{
+ int ret;
+
+ if (! crng_reseed_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = crng_reseed_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+#endif /* WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS */
+
#define CREATE_TRACE_POINTS
#include <trace/events/random.h>
@@ -461,7 +715,22 @@ static struct crng_state primary_crng =
* its value (from 0->1->2).
*/
static int crng_init = 0;
+
#define crng_ready() (likely(crng_init > 1))
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
@@ -593,6 +862,12 @@ static void mix_pool_bytes(struct entrop
unsigned long flags;
trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
+
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_mix_pool_bytes_cb(in, nbytes);
+ /* fall through to mix into native pool too. */
+#endif
+
spin_lock_irqsave(&r->lock, flags);
_mix_pool_bytes(r, in, nbytes);
spin_unlock_irqrestore(&r->lock, flags);
@@ -1059,6 +1334,18 @@ static ssize_t extract_crng_user(void __
__u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ {
+ ssize_t cb_ret = call_extract_crng_user_cb(buf, nbytes);
+ /* If the callback returns -ECANCELED, that signals that iter is
+ * still intact, and flow can safely fall through to the native
+ * implementation.
+ */
+ if (cb_ret != -ECANCELED)
+ return cb_ret;
+ }
+#endif
+
while (nbytes) {
if (large_request && need_resched()) {
if (signal_pending(current)) {
@@ -1499,7 +1786,7 @@ static void _warn_unseeded_randomness(co
#endif
if (print_once ||
- crng_ready() ||
+ crng_ready_maybe_cb() ||
(previous && (caller == READ_ONCE(*previous))))
return;
WRITE_ONCE(*previous, caller);
@@ -1528,6 +1815,14 @@ static void _get_random_bytes(void *buf,
trace_get_random_bytes(nbytes, _RET_IP_);
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* If call__get_random_bytes_cb() doesn't succeed, flow falls through to
+ * the native implementation. _get_random_bytes() must succeed.
+ */
+ if (call__get_random_bytes_cb(buf, nbytes) == 0)
+ return;
+#endif
+
while (nbytes >= CHACHA_BLOCK_SIZE) {
extract_crng(buf);
buf += CHACHA_BLOCK_SIZE;
@@ -1614,12 +1909,12 @@ static void try_to_generate_entropy(void
*/
int wait_for_random_bytes(void)
{
- if (likely(crng_ready()))
+ if (likely(crng_ready_maybe_cb()))
return 0;
do {
int ret;
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready_maybe_cb(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
@@ -1641,7 +1936,7 @@ EXPORT_SYMBOL(wait_for_random_bytes);
*/
bool rng_is_initialized(void)
{
- return crng_ready();
+ return crng_ready_maybe_cb();
}
EXPORT_SYMBOL(rng_is_initialized);
@@ -1819,7 +2114,7 @@ urandom_read(struct file *file, char __u
unsigned long flags;
static int maxwarn = 10;
- if (!crng_ready() && maxwarn > 0) {
+ if (!crng_ready_maybe_cb() && maxwarn > 0) {
maxwarn--;
if (__ratelimit(&urandom_warning))
pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
@@ -1848,6 +2143,11 @@ random_poll(struct file *file, poll_tabl
{
__poll_t mask;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ if (crng_ready_by_cb())
+ return EPOLLIN | EPOLLRDNORM;
+#endif
+
poll_wait(file, &crng_init_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
@@ -1946,6 +2246,16 @@ static long random_ioctl(struct file *f,
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* fall through to reseed native crng too. */
+ if (call_crng_reseed_cb() == 0) {
+ if (crng_init >= 2) {
+ crng_reseed(&primary_crng, &input_pool);
+ crng_global_init_time = jiffies - 1;
+ }
+ return 0;
+ }
+#endif
if (crng_init < 2)
return -ENODATA;
crng_reseed(&primary_crng, &input_pool);
@@ -1998,7 +2308,7 @@ SYSCALL_DEFINE3(getrandom, char __user *
if (count > INT_MAX)
count = INT_MAX;
- if (!(flags & GRND_INSECURE) && !crng_ready()) {
+ if (!(flags & GRND_INSECURE) && !crng_ready_maybe_cb()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
--- ./include/linux/random.h.dist 2021-10-31 15:53:10.000000000 -0500
+++ ./include/linux/random.h 2025-06-28 13:09:13.392547118 -0500
@@ -158,4 +158,37 @@ static inline bool __init arch_get_rando
}
#endif
+#ifndef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS 1
+#endif
+
+typedef int (*_get_random_bytes_cb_t)(void *buf, size_t len);
+struct iov_iter;
+/* kernels >= 5.17.0 use get_random_bytes_user() */
+typedef ssize_t (*get_random_bytes_user_cb_t)(struct iov_iter *iter);
+/* kernels < 5.17.0 use extract_crng_user(), though some LTS kernels,
+ * e.g. 5.10.236, have the 5.17+ architecture backported.
+ */
+typedef ssize_t (*extract_crng_user_cb_t)(void __user *buf, size_t nbytes);
+typedef bool (*crng_ready_cb_t)(void);
+typedef int (*mix_pool_bytes_cb_t)(const void *buf, size_t len);
+typedef int (*credit_init_bits_cb_t)(size_t bits);
+typedef int (*crng_reseed_cb_t)(void);
+
+struct wolfssl_linuxkm_random_bytes_handlers {
+ _get_random_bytes_cb_t _get_random_bytes;
+ get_random_bytes_user_cb_t get_random_bytes_user;
+ extract_crng_user_cb_t extract_crng_user;
+ crng_ready_cb_t crng_ready;
+ mix_pool_bytes_cb_t mix_pool_bytes;
+ credit_init_bits_cb_t credit_init_bits;
+ crng_reseed_cb_t crng_reseed;
+};
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void);
+
#endif /* _LINUX_RANDOM_H */

View File

@@ -0,0 +1,462 @@
--- ./drivers/char/random.c.dist 2022-05-31 08:33:43.006547419 -0500
+++ ./drivers/char/random.c 2025-07-02 11:42:10.098166804 -0500
@@ -60,6 +60,260 @@
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+
+#include <linux/delay.h>
+
+static atomic_long_t random_bytes_cb_owner =
+ ATOMIC_INIT((long)NULL);
+static atomic_t random_bytes_cb_refcnt =
+ ATOMIC_INIT(0); /* 0 if unregistered, 1 if no calls in flight. */
+static _get_random_bytes_cb_t _get_random_bytes_cb = NULL;
+static get_random_bytes_user_cb_t get_random_bytes_user_cb = NULL;
+static crng_ready_cb_t crng_ready_cb = NULL;
+static mix_pool_bytes_cb_t mix_pool_bytes_cb = NULL;
+static credit_init_bits_cb_t credit_init_bits_cb = NULL;
+static crng_reseed_cb_t crng_reseed_cb = NULL;
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers)
+{
+ if ((! new_random_bytes_cb_owner) ||
+ (! handlers) ||
+ (! handlers->_get_random_bytes) ||
+ (! handlers->get_random_bytes_user))
+ {
+ return -EINVAL;
+ }
+
+ /* random_bytes_cb_owner is used to enforce serialization of
+ * wolfssl_register_random_bytes_handlers() and
+ * wolfssl_unregister_random_bytes_handlers().
+ */
+ if (atomic_long_cmpxchg(&random_bytes_cb_owner,
+ (long)NULL,
+ (long)new_random_bytes_cb_owner)
+ != (long)NULL)
+ {
+ return -EBUSY;
+ }
+
+ {
+ int current_random_bytes_cb_refcnt = atomic_read(&random_bytes_cb_refcnt);
+ if (current_random_bytes_cb_refcnt) {
+ pr_err("BUG: random_bytes_cb_refcnt == %d with null random_bytes_cb_owner", current_random_bytes_cb_refcnt);
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -EFAULT;
+ }
+ }
+
+ if (! try_module_get(new_random_bytes_cb_owner)) {
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -ENODEV;
+ }
+
+ _get_random_bytes_cb = handlers->_get_random_bytes;
+ get_random_bytes_user_cb = handlers->get_random_bytes_user;
+ crng_ready_cb = handlers->crng_ready;
+ mix_pool_bytes_cb = handlers->mix_pool_bytes;
+ credit_init_bits_cb = handlers->credit_init_bits;
+ crng_reseed_cb = handlers->crng_reseed;
+
+ barrier();
+ atomic_set_release(&random_bytes_cb_refcnt, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_register_random_bytes_handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void)
+{
+ int current_random_bytes_cb_refcnt;
+ int n_tries;
+ if (! atomic_long_read(&random_bytes_cb_owner))
+ return -ENODEV;
+
+ /* we're racing the kernel at large to try to catch random_bytes_cb_refcnt
+ * with no callers in flight -- retry and relax up to 100 times.
+ */
+ for (n_tries = 0; n_tries < 100; ++n_tries) {
+ current_random_bytes_cb_refcnt = atomic_cmpxchg(&random_bytes_cb_refcnt, 1, 0);
+ if (current_random_bytes_cb_refcnt == 1)
+ break;
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in wolfssl_linuxkm_unregister_random_bytes_handlers.", current_random_bytes_cb_refcnt);
+ break;
+ }
+ if (msleep_interruptible(10) != 0)
+ return -EINTR;
+ }
+ if (current_random_bytes_cb_refcnt != 1) {
+ pr_warn("WARNING: wolfssl_unregister_random_bytes_handlers called with random_bytes_cb_refcnt == %d", current_random_bytes_cb_refcnt);
+ return -EBUSY;
+ }
+
+ _get_random_bytes_cb = NULL;
+ get_random_bytes_user_cb = NULL;
+ crng_ready_cb = NULL;
+ mix_pool_bytes_cb = NULL;
+ credit_init_bits_cb = NULL;
+ crng_reseed_cb = NULL;
+
+ module_put((struct module *)atomic_long_read(&random_bytes_cb_owner));
+ barrier();
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_unregister_random_bytes_handlers);
+
+static __always_inline int reserve_random_bytes_cb(void) {
+ int current_random_bytes_cb_refcnt =
+ atomic_read_acquire(&random_bytes_cb_refcnt);
+
+ if (! current_random_bytes_cb_refcnt)
+ return -ENODEV;
+
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in reserve_random_bytes_cb.", current_random_bytes_cb_refcnt);
+ return -EFAULT;
+ }
+
+ for (;;) {
+ int orig_random_bytes_cb_refcnt =
+ atomic_cmpxchg(
+ &random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt + 1);
+ if (orig_random_bytes_cb_refcnt == current_random_bytes_cb_refcnt)
+ return 0;
+ else if (! orig_random_bytes_cb_refcnt)
+ return -ENODEV;
+ else
+ current_random_bytes_cb_refcnt = orig_random_bytes_cb_refcnt;
+ }
+
+ __builtin_unreachable();
+}
+
+static __always_inline void release_random_bytes_cb(void) {
+ atomic_dec(&random_bytes_cb_refcnt);
+}
+
+static inline int call__get_random_bytes_cb(void *buf, size_t len)
+{
+ int ret;
+
+ if (! _get_random_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = _get_random_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline ssize_t call_get_random_bytes_user_cb(struct iov_iter *iter)
+{
+ ssize_t ret;
+
+ if (! get_random_bytes_user_cb)
+ return -ECANCELED;
+
+ ret = (ssize_t)reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = get_random_bytes_user_cb(iter);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline bool call_crng_ready_cb(void)
+{
+ bool ret;
+
+ /* Null crng_ready_cb signifies that the DRBG is always ready, i.e. that if
+ * called, it will always have or obtain sufficient entropy to fulfill the
+ * call.
+ */
+ if (! crng_ready_cb)
+ return 1;
+
+ if (reserve_random_bytes_cb() != 0)
+ return 0;
+
+ ret = crng_ready_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_mix_pool_bytes_cb(const void *buf, size_t len)
+{
+ int ret;
+
+ if (! mix_pool_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = mix_pool_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_credit_init_bits_cb(size_t bits)
+{
+ int ret;
+
+ if (! credit_init_bits_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = credit_init_bits_cb(bits);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_crng_reseed_cb(void)
+{
+ int ret;
+
+ if (! crng_reseed_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = crng_reseed_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+#endif /* WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS */
+
/*********************************************************************
*
* Initialization and readiness waiting.
@@ -80,7 +334,15 @@ static enum {
CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
} crng_init __read_mostly = CRNG_EMPTY;
static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
+
#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
@@ -106,7 +368,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Dis
*/
bool rng_is_initialized(void)
{
- return crng_ready();
+ return crng_ready_maybe_cb();
}
EXPORT_SYMBOL(rng_is_initialized);
@@ -130,11 +392,11 @@ static void try_to_generate_entropy(void
*/
int wait_for_random_bytes(void)
{
- while (!crng_ready()) {
+ while (!crng_ready_maybe_cb()) {
int ret;
try_to_generate_entropy();
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready_maybe_cb(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
}
@@ -188,7 +450,7 @@ static void __cold process_random_ready_
}
#define warn_unseeded_randomness() \
- if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready_maybe_cb()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
__func__, (void *)_RET_IP_, crng_init)
@@ -407,6 +669,14 @@ static void _get_random_bytes(void *buf,
if (!len)
return;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* If call__get_random_bytes_cb() doesn't succeed, flow falls through to
+ * the native implementation. _get_random_bytes() must succeed.
+ */
+ if (call__get_random_bytes_cb(buf, len) == 0)
+ return;
+#endif
+
first_block_len = min_t(size_t, 32, len);
crng_make_state(chacha_state, buf, first_block_len);
len -= first_block_len;
@@ -456,6 +726,18 @@ static ssize_t get_random_bytes_user(str
if (unlikely(!iov_iter_count(iter)))
return 0;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ {
+ ssize_t cb_ret = call_get_random_bytes_user_cb(iter);
+ /* If the callback returns -ECANCELED, that signals that iter is
+ * still intact, and flow can safely fall through to the native
+ * implementation.
+ */
+ if (cb_ret != -ECANCELED)
+ return cb_ret;
+ }
+#endif
+
/*
* Immediately overwrite the ChaCha key at index 4 with random
* bytes, in case userspace causes copy_to_user() below to sleep
@@ -532,7 +814,7 @@ type get_random_ ##type(void) \
\
warn_unseeded_randomness(); \
\
- if (!crng_ready()) { \
+ if (!crng_ready_maybe_cb()) { \
_get_random_bytes(&ret, sizeof(ret)); \
return ret; \
} \
@@ -656,6 +938,11 @@ static void mix_pool_bytes(const void *b
{
unsigned long flags;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_mix_pool_bytes_cb(buf, len);
+ /* fall through to mix into native pool too. */
+#endif
+
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -707,7 +994,11 @@ static void extract_entropy(void *buf, s
memzero_explicit(&block, sizeof(block));
}
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+#define credit_init_bits(bits) do { (void)call_credit_init_bits_cb(bits); if (!crng_ready()) _credit_init_bits(bits); } while (0)
+#else
#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+#endif
static void __cold _credit_init_bits(size_t bits)
{
@@ -1233,7 +1524,7 @@ SYSCALL_DEFINE3(getrandom, char __user *
if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
- if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (!crng_ready_maybe_cb() && !(flags & GRND_INSECURE)) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
@@ -1249,6 +1540,10 @@ SYSCALL_DEFINE3(getrandom, char __user *
static __poll_t random_poll(struct file *file, poll_table *wait)
{
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ if (crng_ready_by_cb())
+ return EPOLLIN | EPOLLRDNORM;
+#endif
poll_wait(file, &crng_init_wait, wait);
return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
@@ -1290,7 +1585,7 @@ static ssize_t urandom_read_iter(struct
{
static int maxwarn = 10;
- if (!crng_ready()) {
+ if (!crng_ready_maybe_cb()) {
if (!ratelimit_disable && maxwarn <= 0)
++urandom_warning.missed;
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
@@ -1368,6 +1663,14 @@ static long random_ioctl(struct file *f,
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* fall through to reseed native crng too. */
+ if (call_crng_reseed_cb() == 0) {
+ if (crng_ready())
+ crng_reseed();
+ return 0;
+ }
+#endif
if (!crng_ready())
return -ENODATA;
crng_reseed();
--- ./include/linux/random.h.dist 2022-05-31 08:33:43.007547457 -0500
+++ ./include/linux/random.h 2025-06-30 12:06:15.219731761 -0500
@@ -138,4 +138,37 @@ int random_online_cpu(unsigned int cpu);
extern const struct file_operations random_fops, urandom_fops;
#endif
+#ifndef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS 1
+#endif
+
+typedef int (*_get_random_bytes_cb_t)(void *buf, size_t len);
+struct iov_iter;
+/* kernels >= 5.17.0 use get_random_bytes_user() */
+typedef ssize_t (*get_random_bytes_user_cb_t)(struct iov_iter *iter);
+/* kernels < 5.17.0 use extract_crng_user(), though some LTS kernels,
+ * e.g. 5.10.236, have the 5.17+ architecture backported.
+ */
+typedef ssize_t (*extract_crng_user_cb_t)(void __user *buf, size_t nbytes);
+typedef bool (*crng_ready_cb_t)(void);
+typedef int (*mix_pool_bytes_cb_t)(const void *buf, size_t len);
+typedef int (*credit_init_bits_cb_t)(size_t bits);
+typedef int (*crng_reseed_cb_t)(void);
+
+struct wolfssl_linuxkm_random_bytes_handlers {
+ _get_random_bytes_cb_t _get_random_bytes;
+ get_random_bytes_user_cb_t get_random_bytes_user;
+ extract_crng_user_cb_t extract_crng_user;
+ crng_ready_cb_t crng_ready;
+ mix_pool_bytes_cb_t mix_pool_bytes;
+ credit_init_bits_cb_t credit_init_bits;
+ crng_reseed_cb_t crng_reseed;
+};
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void);
+
#endif /* _LINUX_RANDOM_H */

View File

@@ -0,0 +1,475 @@
--- ./drivers/char/random.c.dist 2024-01-19 16:25:03.754138321 -0600
+++ ./drivers/char/random.c 2025-07-02 10:45:31.769041473 -0500
@@ -60,6 +60,260 @@
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+
+#include <linux/delay.h>
+
+static atomic_long_t random_bytes_cb_owner =
+ ATOMIC_INIT((long)NULL);
+static atomic_t random_bytes_cb_refcnt =
+ ATOMIC_INIT(0); /* 0 if unregistered, 1 if no calls in flight. */
+static _get_random_bytes_cb_t _get_random_bytes_cb = NULL;
+static get_random_bytes_user_cb_t get_random_bytes_user_cb = NULL;
+static crng_ready_cb_t crng_ready_cb = NULL;
+static mix_pool_bytes_cb_t mix_pool_bytes_cb = NULL;
+static credit_init_bits_cb_t credit_init_bits_cb = NULL;
+static crng_reseed_cb_t crng_reseed_cb = NULL;
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers)
+{
+ if ((! new_random_bytes_cb_owner) ||
+ (! handlers) ||
+ (! handlers->_get_random_bytes) ||
+ (! handlers->get_random_bytes_user))
+ {
+ return -EINVAL;
+ }
+
+ /* random_bytes_cb_owner is used to enforce serialization of
+ * wolfssl_register_random_bytes_handlers() and
+ * wolfssl_unregister_random_bytes_handlers().
+ */
+ if (atomic_long_cmpxchg(&random_bytes_cb_owner,
+ (long)NULL,
+ (long)new_random_bytes_cb_owner)
+ != (long)NULL)
+ {
+ return -EBUSY;
+ }
+
+ {
+ int current_random_bytes_cb_refcnt = atomic_read(&random_bytes_cb_refcnt);
+ if (current_random_bytes_cb_refcnt) {
+ pr_err("BUG: random_bytes_cb_refcnt == %d with null random_bytes_cb_owner", current_random_bytes_cb_refcnt);
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -EFAULT;
+ }
+ }
+
+ if (! try_module_get(new_random_bytes_cb_owner)) {
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -ENODEV;
+ }
+
+ _get_random_bytes_cb = handlers->_get_random_bytes;
+ get_random_bytes_user_cb = handlers->get_random_bytes_user;
+ crng_ready_cb = handlers->crng_ready;
+ mix_pool_bytes_cb = handlers->mix_pool_bytes;
+ credit_init_bits_cb = handlers->credit_init_bits;
+ crng_reseed_cb = handlers->crng_reseed;
+
+ barrier();
+ atomic_set_release(&random_bytes_cb_refcnt, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_register_random_bytes_handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void)
+{
+ int current_random_bytes_cb_refcnt;
+ int n_tries;
+ if (! atomic_long_read(&random_bytes_cb_owner))
+ return -ENODEV;
+
+ /* we're racing the kernel at large to try to catch random_bytes_cb_refcnt
+ * with no callers in flight -- retry and relax up to 100 times.
+ */
+ for (n_tries = 0; n_tries < 100; ++n_tries) {
+ current_random_bytes_cb_refcnt = atomic_cmpxchg(&random_bytes_cb_refcnt, 1, 0);
+ if (current_random_bytes_cb_refcnt == 1)
+ break;
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in wolfssl_linuxkm_unregister_random_bytes_handlers.", current_random_bytes_cb_refcnt);
+ break;
+ }
+ if (msleep_interruptible(10) != 0)
+ return -EINTR;
+ }
+ if (current_random_bytes_cb_refcnt != 1) {
+ pr_warn("WARNING: wolfssl_unregister_random_bytes_handlers called with random_bytes_cb_refcnt == %d", current_random_bytes_cb_refcnt);
+ return -EBUSY;
+ }
+
+ _get_random_bytes_cb = NULL;
+ get_random_bytes_user_cb = NULL;
+ crng_ready_cb = NULL;
+ mix_pool_bytes_cb = NULL;
+ credit_init_bits_cb = NULL;
+ crng_reseed_cb = NULL;
+
+ module_put((struct module *)atomic_long_read(&random_bytes_cb_owner));
+ barrier();
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_unregister_random_bytes_handlers);
+
+static __always_inline int reserve_random_bytes_cb(void) {
+ int current_random_bytes_cb_refcnt =
+ atomic_read_acquire(&random_bytes_cb_refcnt);
+
+ if (! current_random_bytes_cb_refcnt)
+ return -ENODEV;
+
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in reserve_random_bytes_cb.", current_random_bytes_cb_refcnt);
+ return -EFAULT;
+ }
+
+ for (;;) {
+ int orig_random_bytes_cb_refcnt =
+ atomic_cmpxchg(
+ &random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt + 1);
+ if (orig_random_bytes_cb_refcnt == current_random_bytes_cb_refcnt)
+ return 0;
+ else if (! orig_random_bytes_cb_refcnt)
+ return -ENODEV;
+ else
+ current_random_bytes_cb_refcnt = orig_random_bytes_cb_refcnt;
+ }
+
+ __builtin_unreachable();
+}
+
+static __always_inline void release_random_bytes_cb(void) {
+ atomic_dec(&random_bytes_cb_refcnt);
+}
+
+static inline int call__get_random_bytes_cb(void *buf, size_t len)
+{
+ int ret;
+
+ if (! _get_random_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = _get_random_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline ssize_t call_get_random_bytes_user_cb(struct iov_iter *iter)
+{
+ ssize_t ret;
+
+ if (! get_random_bytes_user_cb)
+ return -ECANCELED;
+
+ ret = (ssize_t)reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = get_random_bytes_user_cb(iter);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline bool call_crng_ready_cb(void)
+{
+ bool ret;
+
+ /* Null crng_ready_cb signifies that the DRBG is always ready, i.e. that if
+ * called, it will always have or obtain sufficient entropy to fulfill the
+ * call.
+ */
+ if (! crng_ready_cb)
+ return 1;
+
+ if (reserve_random_bytes_cb() != 0)
+ return 0;
+
+ ret = crng_ready_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_mix_pool_bytes_cb(const void *buf, size_t len)
+{
+ int ret;
+
+ if (! mix_pool_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = mix_pool_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_credit_init_bits_cb(size_t bits)
+{
+ int ret;
+
+ if (! credit_init_bits_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = credit_init_bits_cb(bits);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_crng_reseed_cb(void)
+{
+ int ret;
+
+ if (! crng_reseed_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = crng_reseed_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+#endif /* WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS */
+
/*********************************************************************
*
* Initialization and readiness waiting.
@@ -80,7 +334,15 @@ static enum {
CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
} crng_init __read_mostly = CRNG_EMPTY;
static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
+
#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
@@ -104,7 +366,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Dis
*/
bool rng_is_initialized(void)
{
- return crng_ready();
+ return crng_ready_maybe_cb();
}
EXPORT_SYMBOL(rng_is_initialized);
@@ -128,11 +390,11 @@ static void try_to_generate_entropy(void
*/
int wait_for_random_bytes(void)
{
- while (!crng_ready()) {
+ while (!crng_ready_maybe_cb()) {
int ret;
try_to_generate_entropy();
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready_maybe_cb(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
}
@@ -141,7 +403,7 @@ int wait_for_random_bytes(void)
EXPORT_SYMBOL(wait_for_random_bytes);
#define warn_unseeded_randomness() \
- if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready_maybe_cb()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
__func__, (void *)_RET_IP_, crng_init)
@@ -362,6 +624,14 @@ static void _get_random_bytes(void *buf,
if (!len)
return;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* If call__get_random_bytes_cb() doesn't succeed, flow falls through to
+ * the native implementation. _get_random_bytes() must succeed.
+ */
+ if (call__get_random_bytes_cb(buf, len) == 0)
+ return;
+#endif
+
first_block_len = min_t(size_t, 32, len);
crng_make_state(chacha_state, buf, first_block_len);
len -= first_block_len;
@@ -408,6 +678,18 @@ static ssize_t get_random_bytes_user(str
if (unlikely(!iov_iter_count(iter)))
return 0;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ {
+ ssize_t cb_ret = call_get_random_bytes_user_cb(iter);
+ /* If the callback returns -ECANCELED, that signals that iter is
+ * still intact, and flow can safely fall through to the native
+ * implementation.
+ */
+ if (cb_ret != -ECANCELED)
+ return cb_ret;
+ }
+#endif
+
/*
* Immediately overwrite the ChaCha key at index 4 with random
* bytes, in case userspace causes copy_to_iter() below to sleep
@@ -484,7 +766,7 @@ type get_random_ ##type(void) \
\
warn_unseeded_randomness(); \
\
- if (!crng_ready()) { \
+ if (!crng_ready_maybe_cb()) { \
_get_random_bytes(&ret, sizeof(ret)); \
return ret; \
} \
@@ -620,6 +902,11 @@ static void mix_pool_bytes(const void *b
{
unsigned long flags;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_mix_pool_bytes_cb(buf, len);
+ /* fall through to mix into native pool too. */
+#endif
+
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -679,7 +966,11 @@ static void extract_entropy(void *buf, s
memzero_explicit(&block, sizeof(block));
}
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+#define credit_init_bits(bits) do { (void)call_credit_init_bits_cb(bits); if (!crng_ready()) _credit_init_bits(bits); } while (0)
+#else
#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+#endif
static void __cold _credit_init_bits(size_t bits)
{
@@ -1321,7 +1612,7 @@ SYSCALL_DEFINE3(getrandom, char __user *
if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
- if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (!crng_ready_maybe_cb() && !(flags & GRND_INSECURE)) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
@@ -1337,6 +1628,10 @@ SYSCALL_DEFINE3(getrandom, char __user *
static __poll_t random_poll(struct file *file, poll_table *wait)
{
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ if (crng_ready_by_cb())
+ return EPOLLIN | EPOLLRDNORM;
+#endif
poll_wait(file, &crng_init_wait, wait);
return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
@@ -1382,10 +1677,10 @@ static ssize_t urandom_read_iter(struct
* Opportunistically attempt to initialize the RNG on platforms that
* have fast cycle counters, but don't (for now) require it to succeed.
*/
- if (!crng_ready())
+ if (!crng_ready_maybe_cb())
try_to_generate_entropy();
- if (!crng_ready()) {
+ if (!crng_ready_maybe_cb()) {
if (!ratelimit_disable && maxwarn <= 0)
++urandom_warning.missed;
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
@@ -1402,7 +1697,7 @@ static ssize_t random_read_iter(struct k
{
int ret;
- if (!crng_ready() &&
+ if (!crng_ready_by_cb() &&
((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
(kiocb->ki_filp->f_flags & O_NONBLOCK)))
return -EAGAIN;
@@ -1468,6 +1763,14 @@ static long random_ioctl(struct file *f,
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* fall through to reseed native crng too. */
+ if (call_crng_reseed_cb() == 0) {
+ if (crng_ready())
+ crng_reseed(NULL);
+ return 0;
+ }
+#endif
if (!crng_ready())
return -ENODATA;
crng_reseed();
--- ./include/linux/random.h.dist 2024-01-19 16:25:07.891223702 -0600
+++ ./include/linux/random.h 2025-06-30 12:38:54.353341542 -0500
@@ -202,4 +202,37 @@ int random_online_cpu(unsigned int cpu);
extern const struct file_operations random_fops, urandom_fops;
#endif
+#ifndef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS 1
+#endif
+
+typedef int (*_get_random_bytes_cb_t)(void *buf, size_t len);
+struct iov_iter;
+/* kernels >= 5.17.0 use get_random_bytes_user() */
+typedef ssize_t (*get_random_bytes_user_cb_t)(struct iov_iter *iter);
+/* kernels < 5.17.0 use extract_crng_user(), though some LTS kernels,
+ * e.g. 5.10.236, have the 5.17+ architecture backported.
+ */
+typedef ssize_t (*extract_crng_user_cb_t)(void __user *buf, size_t nbytes);
+typedef bool (*crng_ready_cb_t)(void);
+typedef int (*mix_pool_bytes_cb_t)(const void *buf, size_t len);
+typedef int (*credit_init_bits_cb_t)(size_t bits);
+typedef int (*crng_reseed_cb_t)(void);
+
+struct wolfssl_linuxkm_random_bytes_handlers {
+ _get_random_bytes_cb_t _get_random_bytes;
+ get_random_bytes_user_cb_t get_random_bytes_user;
+ extract_crng_user_cb_t extract_crng_user;
+ crng_ready_cb_t crng_ready;
+ mix_pool_bytes_cb_t mix_pool_bytes;
+ credit_init_bits_cb_t credit_init_bits;
+ crng_reseed_cb_t crng_reseed;
+};
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void);
+
#endif /* _LINUX_RANDOM_H */

View File

@@ -0,0 +1,484 @@
--- ./drivers/char/random.c.dist 2025-01-19 19:03:47.877152701 -0600
+++ ./drivers/char/random.c 2025-07-02 10:40:21.994303997 -0500
@@ -67,6 +67,260 @@
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+
+#include <linux/delay.h>
+
+static atomic_long_t random_bytes_cb_owner =
+ ATOMIC_INIT((long)NULL);
+static atomic_t random_bytes_cb_refcnt =
+ ATOMIC_INIT(0); /* 0 if unregistered, 1 if no calls in flight. */
+static _get_random_bytes_cb_t _get_random_bytes_cb = NULL;
+static get_random_bytes_user_cb_t get_random_bytes_user_cb = NULL;
+static crng_ready_cb_t crng_ready_cb = NULL;
+static mix_pool_bytes_cb_t mix_pool_bytes_cb = NULL;
+static credit_init_bits_cb_t credit_init_bits_cb = NULL;
+static crng_reseed_cb_t crng_reseed_cb = NULL;
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers)
+{
+ if ((! new_random_bytes_cb_owner) ||
+ (! handlers) ||
+ (! handlers->_get_random_bytes) ||
+ (! handlers->get_random_bytes_user))
+ {
+ return -EINVAL;
+ }
+
+ /* random_bytes_cb_owner is used to enforce serialization of
+ * wolfssl_register_random_bytes_handlers() and
+ * wolfssl_unregister_random_bytes_handlers().
+ */
+ if (atomic_long_cmpxchg(&random_bytes_cb_owner,
+ (long)NULL,
+ (long)new_random_bytes_cb_owner)
+ != (long)NULL)
+ {
+ return -EBUSY;
+ }
+
+ {
+ int current_random_bytes_cb_refcnt = atomic_read(&random_bytes_cb_refcnt);
+ if (current_random_bytes_cb_refcnt) {
+ pr_err("BUG: random_bytes_cb_refcnt == %d with null random_bytes_cb_owner", current_random_bytes_cb_refcnt);
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -EFAULT;
+ }
+ }
+
+ if (! try_module_get(new_random_bytes_cb_owner)) {
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -ENODEV;
+ }
+
+ _get_random_bytes_cb = handlers->_get_random_bytes;
+ get_random_bytes_user_cb = handlers->get_random_bytes_user;
+ crng_ready_cb = handlers->crng_ready;
+ mix_pool_bytes_cb = handlers->mix_pool_bytes;
+ credit_init_bits_cb = handlers->credit_init_bits;
+ crng_reseed_cb = handlers->crng_reseed;
+
+ barrier();
+ atomic_set_release(&random_bytes_cb_refcnt, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_register_random_bytes_handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void)
+{
+ int current_random_bytes_cb_refcnt;
+ int n_tries;
+ if (! atomic_long_read(&random_bytes_cb_owner))
+ return -ENODEV;
+
+ /* we're racing the kernel at large to try to catch random_bytes_cb_refcnt
+ * with no callers in flight -- retry and relax up to 100 times.
+ */
+ for (n_tries = 0; n_tries < 100; ++n_tries) {
+ current_random_bytes_cb_refcnt = atomic_cmpxchg(&random_bytes_cb_refcnt, 1, 0);
+ if (current_random_bytes_cb_refcnt == 1)
+ break;
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in wolfssl_linuxkm_unregister_random_bytes_handlers.", current_random_bytes_cb_refcnt);
+ break;
+ }
+ if (msleep_interruptible(10) != 0)
+ return -EINTR;
+ }
+ if (current_random_bytes_cb_refcnt != 1) {
+ pr_warn("WARNING: wolfssl_unregister_random_bytes_handlers called with random_bytes_cb_refcnt == %d", current_random_bytes_cb_refcnt);
+ return -EBUSY;
+ }
+
+ _get_random_bytes_cb = NULL;
+ get_random_bytes_user_cb = NULL;
+ crng_ready_cb = NULL;
+ mix_pool_bytes_cb = NULL;
+ credit_init_bits_cb = NULL;
+ crng_reseed_cb = NULL;
+
+ module_put((struct module *)atomic_long_read(&random_bytes_cb_owner));
+ barrier();
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_unregister_random_bytes_handlers);
+
+static __always_inline int reserve_random_bytes_cb(void) {
+ int current_random_bytes_cb_refcnt =
+ atomic_read_acquire(&random_bytes_cb_refcnt);
+
+ if (! current_random_bytes_cb_refcnt)
+ return -ENODEV;
+
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in reserve_random_bytes_cb.", current_random_bytes_cb_refcnt);
+ return -EFAULT;
+ }
+
+ for (;;) {
+ int orig_random_bytes_cb_refcnt =
+ atomic_cmpxchg(
+ &random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt + 1);
+ if (orig_random_bytes_cb_refcnt == current_random_bytes_cb_refcnt)
+ return 0;
+ else if (! orig_random_bytes_cb_refcnt)
+ return -ENODEV;
+ else
+ current_random_bytes_cb_refcnt = orig_random_bytes_cb_refcnt;
+ }
+
+ __builtin_unreachable();
+}
+
+static __always_inline void release_random_bytes_cb(void) {
+ atomic_dec(&random_bytes_cb_refcnt);
+}
+
+static inline int call__get_random_bytes_cb(void *buf, size_t len)
+{
+ int ret;
+
+ if (! _get_random_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = _get_random_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline ssize_t call_get_random_bytes_user_cb(struct iov_iter *iter)
+{
+ ssize_t ret;
+
+ if (! get_random_bytes_user_cb)
+ return -ECANCELED;
+
+ ret = (ssize_t)reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = get_random_bytes_user_cb(iter);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline bool call_crng_ready_cb(void)
+{
+ bool ret;
+
+ /* Null crng_ready_cb signifies that the DRBG is always ready, i.e. that if
+ * called, it will always have or obtain sufficient entropy to fulfill the
+ * call.
+ */
+ if (! crng_ready_cb)
+ return 1;
+
+ if (reserve_random_bytes_cb() != 0)
+ return 0;
+
+ ret = crng_ready_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_mix_pool_bytes_cb(const void *buf, size_t len)
+{
+ int ret;
+
+ if (! mix_pool_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = mix_pool_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_credit_init_bits_cb(size_t bits)
+{
+ int ret;
+
+ if (! credit_init_bits_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = credit_init_bits_cb(bits);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_crng_reseed_cb(void)
+{
+ int ret;
+
+ if (! crng_reseed_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = crng_reseed_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+#endif /* WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS */
+
/*********************************************************************
*
* Initialization and readiness waiting.
@@ -87,7 +341,15 @@ static enum {
CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
} crng_init __read_mostly = CRNG_EMPTY;
static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
+
#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
@@ -112,7 +374,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Dis
*/
bool rng_is_initialized(void)
{
- return crng_ready();
+ return crng_ready_maybe_cb();
}
EXPORT_SYMBOL(rng_is_initialized);
@@ -136,11 +398,11 @@ static void try_to_generate_entropy(void
*/
int wait_for_random_bytes(void)
{
- while (!crng_ready()) {
+ while (!crng_ready_maybe_cb()) {
int ret;
try_to_generate_entropy();
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready_maybe_cb(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
}
@@ -160,7 +422,7 @@ int __cold execute_with_initialized_rng(
int ret = 0;
spin_lock_irqsave(&random_ready_notifier.lock, flags);
- if (crng_ready())
+ if (crng_ready_maybe_cb())
nb->notifier_call(nb, 0, NULL);
else
ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
@@ -169,7 +431,7 @@ int __cold execute_with_initialized_rng(
}
#define warn_unseeded_randomness() \
- if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready_maybe_cb()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
__func__, (void *)_RET_IP_, crng_init)
@@ -402,6 +664,14 @@ static void _get_random_bytes(void *buf,
if (!len)
return;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* If call__get_random_bytes_cb() doesn't succeed, flow falls through to
+ * the native implementation. _get_random_bytes() must succeed.
+ */
+ if (call__get_random_bytes_cb(buf, len) == 0)
+ return;
+#endif
+
first_block_len = min_t(size_t, 32, len);
crng_make_state(chacha_state, buf, first_block_len);
len -= first_block_len;
@@ -448,6 +718,18 @@ static ssize_t get_random_bytes_user(str
if (unlikely(!iov_iter_count(iter)))
return 0;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ {
+ ssize_t cb_ret = call_get_random_bytes_user_cb(iter);
+ /* If the callback returns -ECANCELED, that signals that iter is
+ * still intact, and flow can safely fall through to the native
+ * implementation.
+ */
+ if (cb_ret != -ECANCELED)
+ return cb_ret;
+ }
+#endif
+
/*
* Immediately overwrite the ChaCha key at index 4 with random
* bytes, in case userspace causes copy_to_iter() below to sleep
@@ -524,7 +806,7 @@ type get_random_ ##type(void) \
\
warn_unseeded_randomness(); \
\
- if (!crng_ready()) { \
+ if (!crng_ready_maybe_cb()) { \
_get_random_bytes(&ret, sizeof(ret)); \
return ret; \
} \
@@ -660,6 +942,11 @@ static void mix_pool_bytes(const void *b
{
unsigned long flags;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_mix_pool_bytes_cb(buf, len);
+ /* fall through to mix into native pool too. */
+#endif
+
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -719,7 +1006,11 @@ static void extract_entropy(void *buf, s
memzero_explicit(&block, sizeof(block));
}
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+#define credit_init_bits(bits) do { (void)call_credit_init_bits_cb(bits); if (!crng_ready()) _credit_init_bits(bits); } while (0)
+#else
#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+#endif
static void __cold _credit_init_bits(size_t bits)
{
@@ -1400,7 +1691,7 @@ SYSCALL_DEFINE3(getrandom, char __user *
if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
- if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (!crng_ready_maybe_cb() && !(flags & GRND_INSECURE)) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
@@ -1416,6 +1707,10 @@ SYSCALL_DEFINE3(getrandom, char __user *
static __poll_t random_poll(struct file *file, poll_table *wait)
{
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ if (crng_ready_by_cb())
+ return EPOLLIN | EPOLLRDNORM;
+#endif
poll_wait(file, &crng_init_wait, wait);
return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
@@ -1461,10 +1756,10 @@ static ssize_t urandom_read_iter(struct
* Opportunistically attempt to initialize the RNG on platforms that
* have fast cycle counters, but don't (for now) require it to succeed.
*/
- if (!crng_ready())
+ if (!crng_ready_maybe_cb())
try_to_generate_entropy();
- if (!crng_ready()) {
+ if (!crng_ready_maybe_cb()) {
if (!ratelimit_disable && maxwarn <= 0)
++urandom_warning.missed;
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
@@ -1481,7 +1776,7 @@ static ssize_t random_read_iter(struct k
{
int ret;
- if (!crng_ready() &&
+ if (!crng_ready_by_cb() &&
((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
(kiocb->ki_filp->f_flags & O_NONBLOCK)))
return -EAGAIN;
@@ -1546,6 +1841,14 @@ static long random_ioctl(struct file *f,
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* fall through to reseed native crng too. */
+ if (call_crng_reseed_cb() == 0) {
+ if (crng_ready())
+ crng_reseed(NULL);
+ return 0;
+ }
+#endif
if (!crng_ready())
return -ENODATA;
crng_reseed(NULL);
--- ./include/linux/random.h.dist 2025-01-19 19:03:57.524328914 -0600
+++ ./include/linux/random.h 2025-06-30 12:04:32.801676104 -0500
@@ -161,4 +161,37 @@ int random_online_cpu(unsigned int cpu);
extern const struct file_operations random_fops, urandom_fops;
#endif
+#ifndef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS 1
+#endif
+
+typedef int (*_get_random_bytes_cb_t)(void *buf, size_t len);
+struct iov_iter;
+/* kernels >= 5.17.0 use get_random_bytes_user() */
+typedef ssize_t (*get_random_bytes_user_cb_t)(struct iov_iter *iter);
+/* kernels < 5.17.0 use extract_crng_user(), though some LTS kernels,
+ * e.g. 5.10.236, have the 5.17+ architecture backported.
+ */
+typedef ssize_t (*extract_crng_user_cb_t)(void __user *buf, size_t nbytes);
+typedef bool (*crng_ready_cb_t)(void);
+typedef int (*mix_pool_bytes_cb_t)(const void *buf, size_t len);
+typedef int (*credit_init_bits_cb_t)(size_t bits);
+typedef int (*crng_reseed_cb_t)(void);
+
+struct wolfssl_linuxkm_random_bytes_handlers {
+ _get_random_bytes_cb_t _get_random_bytes;
+ get_random_bytes_user_cb_t get_random_bytes_user;
+ extract_crng_user_cb_t extract_crng_user;
+ crng_ready_cb_t crng_ready;
+ mix_pool_bytes_cb_t mix_pool_bytes;
+ credit_init_bits_cb_t credit_init_bits;
+ crng_reseed_cb_t crng_reseed;
+};
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void);
+
#endif /* _LINUX_RANDOM_H */

View File

@@ -0,0 +1,484 @@
--- ./drivers/char/random.c.dist 2025-05-27 15:19:59.167827834 -0500
+++ ./drivers/char/random.c 2025-07-02 09:34:56.197972526 -0500
@@ -67,6 +67,260 @@
#include <asm/irq_regs.h>
#include <asm/io.h>
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+
+#include <linux/delay.h>
+
+static atomic_long_t random_bytes_cb_owner =
+ ATOMIC_INIT((long)NULL);
+static atomic_t random_bytes_cb_refcnt =
+ ATOMIC_INIT(0); /* 0 if unregistered, 1 if no calls in flight. */
+static _get_random_bytes_cb_t _get_random_bytes_cb = NULL;
+static get_random_bytes_user_cb_t get_random_bytes_user_cb = NULL;
+static crng_ready_cb_t crng_ready_cb = NULL;
+static mix_pool_bytes_cb_t mix_pool_bytes_cb = NULL;
+static credit_init_bits_cb_t credit_init_bits_cb = NULL;
+static crng_reseed_cb_t crng_reseed_cb = NULL;
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers)
+{
+ if ((! new_random_bytes_cb_owner) ||
+ (! handlers) ||
+ (! handlers->_get_random_bytes) ||
+ (! handlers->get_random_bytes_user))
+ {
+ return -EINVAL;
+ }
+
+ /* random_bytes_cb_owner is used to enforce serialization of
+ * wolfssl_register_random_bytes_handlers() and
+ * wolfssl_unregister_random_bytes_handlers().
+ */
+ if (atomic_long_cmpxchg(&random_bytes_cb_owner,
+ (long)NULL,
+ (long)new_random_bytes_cb_owner)
+ != (long)NULL)
+ {
+ return -EBUSY;
+ }
+
+ {
+ int current_random_bytes_cb_refcnt = atomic_read(&random_bytes_cb_refcnt);
+ if (current_random_bytes_cb_refcnt) {
+ pr_err("BUG: random_bytes_cb_refcnt == %d with null random_bytes_cb_owner", current_random_bytes_cb_refcnt);
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -EFAULT;
+ }
+ }
+
+ if (! try_module_get(new_random_bytes_cb_owner)) {
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+ return -ENODEV;
+ }
+
+ _get_random_bytes_cb = handlers->_get_random_bytes;
+ get_random_bytes_user_cb = handlers->get_random_bytes_user;
+ crng_ready_cb = handlers->crng_ready;
+ mix_pool_bytes_cb = handlers->mix_pool_bytes;
+ credit_init_bits_cb = handlers->credit_init_bits;
+ crng_reseed_cb = handlers->crng_reseed;
+
+ barrier();
+ atomic_set_release(&random_bytes_cb_refcnt, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_register_random_bytes_handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void)
+{
+ int current_random_bytes_cb_refcnt;
+ int n_tries;
+ if (! atomic_long_read(&random_bytes_cb_owner))
+ return -ENODEV;
+
+ /* we're racing the kernel at large to try to catch random_bytes_cb_refcnt
+ * with no callers in flight -- retry and relax up to 100 times.
+ */
+ for (n_tries = 0; n_tries < 100; ++n_tries) {
+ current_random_bytes_cb_refcnt = atomic_cmpxchg(&random_bytes_cb_refcnt, 1, 0);
+ if (current_random_bytes_cb_refcnt == 1)
+ break;
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in wolfssl_linuxkm_unregister_random_bytes_handlers.", current_random_bytes_cb_refcnt);
+ break;
+ }
+ if (msleep_interruptible(10) != 0)
+ return -EINTR;
+ }
+ if (current_random_bytes_cb_refcnt != 1) {
+ pr_warn("WARNING: wolfssl_unregister_random_bytes_handlers called with random_bytes_cb_refcnt == %d", current_random_bytes_cb_refcnt);
+ return -EBUSY;
+ }
+
+ _get_random_bytes_cb = NULL;
+ get_random_bytes_user_cb = NULL;
+ crng_ready_cb = NULL;
+ mix_pool_bytes_cb = NULL;
+ credit_init_bits_cb = NULL;
+ crng_reseed_cb = NULL;
+
+ module_put((struct module *)atomic_long_read(&random_bytes_cb_owner));
+ barrier();
+ atomic_long_set(&random_bytes_cb_owner, (long)NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wolfssl_linuxkm_unregister_random_bytes_handlers);
+
+static __always_inline int reserve_random_bytes_cb(void) {
+ int current_random_bytes_cb_refcnt =
+ atomic_read_acquire(&random_bytes_cb_refcnt);
+
+ if (! current_random_bytes_cb_refcnt)
+ return -ENODEV;
+
+ if (current_random_bytes_cb_refcnt < 0) {
+ pr_err("BUG: random_bytes_cb_refcnt is %d in reserve_random_bytes_cb.", current_random_bytes_cb_refcnt);
+ return -EFAULT;
+ }
+
+ for (;;) {
+ int orig_random_bytes_cb_refcnt =
+ atomic_cmpxchg(
+ &random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt,
+ current_random_bytes_cb_refcnt + 1);
+ if (orig_random_bytes_cb_refcnt == current_random_bytes_cb_refcnt)
+ return 0;
+ else if (! orig_random_bytes_cb_refcnt)
+ return -ENODEV;
+ else
+ current_random_bytes_cb_refcnt = orig_random_bytes_cb_refcnt;
+ }
+
+ __builtin_unreachable();
+}
+
+static __always_inline void release_random_bytes_cb(void) {
+ atomic_dec(&random_bytes_cb_refcnt);
+}
+
+static inline int call__get_random_bytes_cb(void *buf, size_t len)
+{
+ int ret;
+
+ if (! _get_random_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = _get_random_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline ssize_t call_get_random_bytes_user_cb(struct iov_iter *iter)
+{
+ ssize_t ret;
+
+ if (! get_random_bytes_user_cb)
+ return -ECANCELED;
+
+ ret = (ssize_t)reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = get_random_bytes_user_cb(iter);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline bool call_crng_ready_cb(void)
+{
+ bool ret;
+
+ /* Null crng_ready_cb signifies that the DRBG is always ready, i.e. that if
+ * called, it will always have or obtain sufficient entropy to fulfill the
+ * call.
+ */
+ if (! crng_ready_cb)
+ return 1;
+
+ if (reserve_random_bytes_cb() != 0)
+ return 0;
+
+ ret = crng_ready_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_mix_pool_bytes_cb(const void *buf, size_t len)
+{
+ int ret;
+
+ if (! mix_pool_bytes_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = mix_pool_bytes_cb(buf, len);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_credit_init_bits_cb(size_t bits)
+{
+ int ret;
+
+ if (! credit_init_bits_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = credit_init_bits_cb(bits);
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+static inline int call_crng_reseed_cb(void)
+{
+ int ret;
+
+ if (! crng_reseed_cb)
+ return -ENODEV;
+
+ ret = reserve_random_bytes_cb();
+ if (ret)
+ return ret;
+
+ ret = crng_reseed_cb();
+
+ release_random_bytes_cb();
+
+ return ret;
+}
+
+#endif /* WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS */
+
/*********************************************************************
*
* Initialization and readiness waiting.
@@ -87,7 +341,15 @@ static enum {
CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
} crng_init __read_mostly = CRNG_EMPTY;
static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
+
#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define crng_ready_by_cb() (atomic_read(&random_bytes_cb_refcnt) && call_crng_ready_cb())
+ #define crng_ready_maybe_cb() (atomic_read(&random_bytes_cb_refcnt) ? (call_crng_ready_cb() || crng_ready()) : crng_ready())
+#else
+ #define crng_ready_maybe_cb() crng_ready()
+#endif
+
/* Various types of waiters for crng_init->CRNG_READY transition. */
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct fasync_struct *fasync;
@@ -112,7 +374,7 @@ MODULE_PARM_DESC(ratelimit_disable, "Dis
*/
bool rng_is_initialized(void)
{
- return crng_ready();
+ return crng_ready_maybe_cb();
}
EXPORT_SYMBOL(rng_is_initialized);
@@ -136,11 +398,11 @@ static void try_to_generate_entropy(void
*/
int wait_for_random_bytes(void)
{
- while (!crng_ready()) {
+ while (!crng_ready_maybe_cb()) {
int ret;
try_to_generate_entropy();
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready_maybe_cb(), HZ);
if (ret)
return ret > 0 ? 0 : ret;
}
@@ -160,7 +422,7 @@ int __cold execute_with_initialized_rng(
int ret = 0;
spin_lock_irqsave(&random_ready_notifier.lock, flags);
- if (crng_ready())
+ if (crng_ready_maybe_cb())
nb->notifier_call(nb, 0, NULL);
else
ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
@@ -169,7 +431,7 @@ int __cold execute_with_initialized_rng(
}
#define warn_unseeded_randomness() \
- if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready_maybe_cb()) \
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
__func__, (void *)_RET_IP_, crng_init)
@@ -402,6 +664,14 @@ static void _get_random_bytes(void *buf,
if (!len)
return;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* If call__get_random_bytes_cb() doesn't succeed, flow falls through to
+ * the native implementation. _get_random_bytes() must succeed.
+ */
+ if (call__get_random_bytes_cb(buf, len) == 0)
+ return;
+#endif
+
first_block_len = min_t(size_t, 32, len);
crng_make_state(chacha_state, buf, first_block_len);
len -= first_block_len;
@@ -448,6 +718,18 @@ static ssize_t get_random_bytes_user(str
if (unlikely(!iov_iter_count(iter)))
return 0;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ {
+ ssize_t cb_ret = call_get_random_bytes_user_cb(iter);
+ /* If the callback returns -ECANCELED, that signals that iter is
+ * still intact, and flow can safely fall through to the native
+ * implementation.
+ */
+ if (cb_ret != -ECANCELED)
+ return cb_ret;
+ }
+#endif
+
/*
* Immediately overwrite the ChaCha key at index 4 with random
* bytes, in case userspace causes copy_to_iter() below to sleep
@@ -524,7 +806,7 @@ type get_random_ ##type(void) \
\
warn_unseeded_randomness(); \
\
- if (!crng_ready()) { \
+ if (!crng_ready_maybe_cb()) { \
_get_random_bytes(&ret, sizeof(ret)); \
return ret; \
} \
@@ -660,6 +942,11 @@ static void mix_pool_bytes(const void *b
{
unsigned long flags;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ (void)call_mix_pool_bytes_cb(buf, len);
+ /* fall through to mix into native pool too. */
+#endif
+
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -719,7 +1006,11 @@ static void extract_entropy(void *buf, s
memzero_explicit(&block, sizeof(block));
}
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+#define credit_init_bits(bits) do { (void)call_credit_init_bits_cb(bits); if (!crng_ready()) _credit_init_bits(bits); } while (0)
+#else
#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+#endif
static void __cold _credit_init_bits(size_t bits)
{
@@ -1400,7 +1691,7 @@ SYSCALL_DEFINE3(getrandom, char __user *
if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
- if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (!crng_ready_maybe_cb() && !(flags & GRND_INSECURE)) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
@@ -1416,6 +1707,10 @@ SYSCALL_DEFINE3(getrandom, char __user *
static __poll_t random_poll(struct file *file, poll_table *wait)
{
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ if (crng_ready_by_cb())
+ return EPOLLIN | EPOLLRDNORM;
+#endif
poll_wait(file, &crng_init_wait, wait);
return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
@@ -1461,10 +1756,10 @@ static ssize_t urandom_read_iter(struct
* Opportunistically attempt to initialize the RNG on platforms that
* have fast cycle counters, but don't (for now) require it to succeed.
*/
- if (!crng_ready())
+ if (!crng_ready_maybe_cb())
try_to_generate_entropy();
- if (!crng_ready()) {
+ if (!crng_ready_maybe_cb()) {
if (!ratelimit_disable && maxwarn <= 0)
++urandom_warning.missed;
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
@@ -1481,7 +1776,7 @@ static ssize_t random_read_iter(struct k
{
int ret;
- if (!crng_ready() &&
+ if (!crng_ready_by_cb() &&
((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
(kiocb->ki_filp->f_flags & O_NONBLOCK)))
return -EAGAIN;
@@ -1546,6 +1841,14 @@ static long random_ioctl(struct file *f,
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ /* fall through to reseed native crng too. */
+ if (call_crng_reseed_cb() == 0) {
+ if (crng_ready())
+ crng_reseed(NULL);
+ return 0;
+ }
+#endif
if (!crng_ready())
return -ENODATA;
crng_reseed(NULL);
--- ./include/linux/random.h.dist 2025-05-27 15:20:04.394946820 -0500
+++ ./include/linux/random.h 2025-06-30 12:04:12.032296708 -0500
@@ -154,4 +154,37 @@ int random_online_cpu(unsigned int cpu);
extern const struct file_operations random_fops, urandom_fops;
#endif
+#ifndef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
+ #define WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS 1
+#endif
+
+typedef int (*_get_random_bytes_cb_t)(void *buf, size_t len);
+struct iov_iter;
+/* kernels >= 5.17.0 use get_random_bytes_user() */
+typedef ssize_t (*get_random_bytes_user_cb_t)(struct iov_iter *iter);
+/* kernels < 5.17.0 use extract_crng_user(), though some LTS kernels,
+ * e.g. 5.10.236, have the 5.17+ architecture backported.
+ */
+typedef ssize_t (*extract_crng_user_cb_t)(void __user *buf, size_t nbytes);
+typedef bool (*crng_ready_cb_t)(void);
+typedef int (*mix_pool_bytes_cb_t)(const void *buf, size_t len);
+typedef int (*credit_init_bits_cb_t)(size_t bits);
+typedef int (*crng_reseed_cb_t)(void);
+
+struct wolfssl_linuxkm_random_bytes_handlers {
+ _get_random_bytes_cb_t _get_random_bytes;
+ get_random_bytes_user_cb_t get_random_bytes_user;
+ extract_crng_user_cb_t extract_crng_user;
+ crng_ready_cb_t crng_ready;
+ mix_pool_bytes_cb_t mix_pool_bytes;
+ credit_init_bits_cb_t credit_init_bits;
+ crng_reseed_cb_t crng_reseed;
+};
+
+int wolfssl_linuxkm_register_random_bytes_handlers(
+ struct module *new_random_bytes_cb_owner,
+ const struct wolfssl_linuxkm_random_bytes_handlers *handlers);
+
+int wolfssl_linuxkm_unregister_random_bytes_handlers(void);
+
#endif /* _LINUX_RANDOM_H */

View File

@@ -0,0 +1,40 @@
#!/bin/bash
# This script is an internal tool that regenerates kernel patches for
# WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS, using full kernel sources staged
# for development.
if [[ ! -d 6.15 ]]; then
echo "6.15 not found -- wrong working dir?" >&2
exit 1
fi
for v in *; do
if [[ ! -d "$v" || "$v" == "src" ]]; then
continue
fi
if [[ ! "$v" =~ ^[0-9]+\.[0-9]+([.-].*)?$ ]]; then
echo "skipping ${v} (malformed version)"
continue
fi
if [[ ! -f "src/${v}/drivers/char/random.c.dist" ||
! -f "src/${v}/drivers/char/random.c" ||
! -f "src/${v}/include/linux/random.h.dist" ||
! -f "src/${v}/include/linux/random.h" ]]; then
echo "skipping ${v} (missing src files)"
continue
fi
pushd "src/$v" >/dev/null || break
out_f="../../${v}/WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS-${v//./v}.patch"
diff --minimal -up ./drivers/char/{random.c.dist,random.c} >| "$out_f"
if [[ $? != "1" ]]; then
echo "diff ${v}/src/drivers/char/{random.c.dist,random.c} exited with unexpected status." >&2
exit 1
fi
diff --minimal -up ./include/linux/{random.h.dist,random.h} >> "$out_f"
if [[ $? != "1" ]]; then
echo "diff ${v}/src/include/linux/{random.h.dist,random.h} exited with unexpected status." >&2
exit 1
fi
popd >/dev/null || exit $?
done

View File

@@ -41,30 +41,9 @@ struct wc_thread_fpu_count_ent {
};
struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_states = NULL;
#ifdef WOLFSSL_COMMERCIAL_LICENSE
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
#error WOLFSSL_COMMERCIAL_LICENSE requires LINUXKM_FPU_STATES_FOLLOW_THREADS
#endif
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wnested-externs"
/* avoid dependence on "alternatives_patched" and "xfd_validate_state()". */
#undef CONFIG_X86_DEBUG_FPU
#include "../kernel/fpu/internal.h"
#include "../kernel/fpu/xstate.h"
#pragma GCC diagnostic pop
static union wc_linuxkm_fpu_savebuf {
byte buf[1024]; /* must be 64-byte-aligned */
struct fpstate fpstate;
} *wc_linuxkm_fpu_savebufs = NULL;
#endif /* WOLFSSL_COMMERCIAL_LICENSE */
#define WC_FPU_COUNT_MASK 0x7fffffffU
#define WC_FPU_SAVED_MASK 0x80000000U
#define WC_FPU_COUNT_MASK 0x3fffffffU
#define WC_FPU_INHIBITED_FLAG 0x40000000U
#define WC_FPU_ALREADY_FLAG 0x80000000U
WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
{
@@ -75,7 +54,7 @@ WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
#else
static int warned_for_repeat_alloc = 0;
if (! warned_for_repeat_alloc) {
pr_err("attempt at repeat allocation"
pr_err("BUG: attempt at repeat allocation"
" in allocate_wolfcrypt_linuxkm_fpu_states\n");
warned_for_repeat_alloc = 1;
}
@@ -83,21 +62,14 @@ WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
#endif
}
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
if (nr_cpu_ids >= 16)
wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids * 2;
else
wc_linuxkm_fpu_states_n_tracked = 32;
#else
wc_linuxkm_fpu_states_n_tracked = nr_cpu_ids;
#endif
wc_linuxkm_fpu_states =
(struct wc_thread_fpu_count_ent *)malloc(
wc_linuxkm_fpu_states_n_tracked * sizeof(wc_linuxkm_fpu_states[0]));
if (! wc_linuxkm_fpu_states) {
pr_err("allocation of %lu bytes for "
pr_err("ERROR: allocation of %lu bytes for "
"wc_linuxkm_fpu_states failed.\n",
nr_cpu_ids * sizeof(struct fpu_state *));
return MEMORY_E;
@@ -106,35 +78,6 @@ WARN_UNUSED_RESULT int allocate_wolfcrypt_linuxkm_fpu_states(void)
memset(wc_linuxkm_fpu_states, 0, wc_linuxkm_fpu_states_n_tracked
* sizeof(wc_linuxkm_fpu_states[0]));
#ifdef WOLFSSL_COMMERCIAL_LICENSE
wc_linuxkm_fpu_savebufs = (union wc_linuxkm_fpu_savebuf *)malloc(
wc_linuxkm_fpu_states_n_tracked * sizeof(*wc_linuxkm_fpu_savebufs));
if (! wc_linuxkm_fpu_savebufs) {
pr_err("allocation of %lu bytes for "
"wc_linuxkm_fpu_savebufs failed.\n",
WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
* sizeof(*wc_linuxkm_fpu_savebufs));
free(wc_linuxkm_fpu_states);
wc_linuxkm_fpu_states = NULL;
return MEMORY_E;
}
if ((uintptr_t)wc_linuxkm_fpu_savebufs
& (WC_LINUXKM_ROUND_UP_P_OF_2(sizeof(*wc_linuxkm_fpu_savebufs)) - 1))
{
pr_err("allocation of %lu bytes for "
"wc_linuxkm_fpu_savebufs allocated with wrong alignment 0x%lx.\n",
WC_LINUXKM_ROUND_UP_P_OF_2(wc_linuxkm_fpu_states_n_tracked)
* sizeof(*wc_linuxkm_fpu_savebufs),
(uintptr_t)wc_linuxkm_fpu_savebufs);
free(wc_linuxkm_fpu_savebufs);
wc_linuxkm_fpu_savebufs = NULL;
free(wc_linuxkm_fpu_states);
wc_linuxkm_fpu_states = NULL;
return MEMORY_E;
}
#endif
return 0;
}
@@ -142,11 +85,8 @@ void free_wolfcrypt_linuxkm_fpu_states(void) {
struct wc_thread_fpu_count_ent *i, *i_endptr;
pid_t i_pid;
if (wc_linuxkm_fpu_states == NULL) {
pr_err("free_wolfcrypt_linuxkm_fpu_states called"
" before allocate_wolfcrypt_linuxkm_fpu_states.\n");
if (wc_linuxkm_fpu_states == NULL)
return;
}
for (i = wc_linuxkm_fpu_states,
i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
@@ -157,88 +97,16 @@ void free_wolfcrypt_linuxkm_fpu_states(void) {
if (i_pid == 0)
continue;
if (i->fpu_state != 0) {
pr_err("free_wolfcrypt_linuxkm_fpu_states called"
pr_err("ERROR: free_wolfcrypt_linuxkm_fpu_states called"
" with nonzero state 0x%x for pid %d.\n", i->fpu_state, i_pid);
i->fpu_state = 0;
}
}
#ifdef WOLFSSL_COMMERCIAL_LICENSE
free(wc_linuxkm_fpu_savebufs);
wc_linuxkm_fpu_savebufs = NULL;
#endif
free(wc_linuxkm_fpu_states);
wc_linuxkm_fpu_states = NULL;
}
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
/* legacy thread-local storage facility for tracking recursive fpu
* pushing/popping
*/
static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(
int create_p, int assume_fpu_began)
{
struct wc_thread_fpu_count_ent *i, *i_endptr, *i_empty;
pid_t my_pid = task_pid_nr(current), i_pid;
(void)assume_fpu_began;
{
static int _warned_on_null = 0;
if (wc_linuxkm_fpu_states == NULL)
{
#ifdef HAVE_FIPS
/* FIPS needs to use SHA256 for the core verify HMAC, before
* reaching the regular wolfCrypt_Init() logic. to break the
* dependency loop on intelasm builds, we allocate here.
* this is not thread-safe and doesn't need to be.
*/
if ((! create_p) || (allocate_wolfcrypt_linuxkm_fpu_states() != 0))
#endif
{
if (_warned_on_null == 0) {
pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
" before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
_warned_on_null = 1;
}
return NULL;
}
}
}
i_endptr = &wc_linuxkm_fpu_states[wc_linuxkm_fpu_states_n_tracked];
for (;;) {
for (i = wc_linuxkm_fpu_states,
i_empty = NULL;
i < i_endptr;
++i)
{
i_pid = __atomic_load_n(&i->pid, __ATOMIC_CONSUME);
if (i_pid == my_pid)
return i;
if ((i_empty == NULL) && (i_pid == 0))
i_empty = i;
}
if ((i_empty == NULL) || (! create_p))
return NULL;
i_pid = 0;
if (__atomic_compare_exchange_n(
&(i_empty->pid),
&i_pid,
my_pid,
0 /* weak */,
__ATOMIC_SEQ_CST /* success_memmodel */,
__ATOMIC_SEQ_CST /* failure_memmodel */))
{
return i_empty;
}
}
}
#else /* !LINUXKM_FPU_STATES_FOLLOW_THREADS */
/* lock-free O(1)-lookup CPU-local storage facility for tracking recursive fpu
* pushing/popping.
*
@@ -265,7 +133,7 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int c
#endif
{
if (_warned_on_null == 0) {
pr_err("wc_linuxkm_fpu_state_assoc called by pid %d"
pr_err("BUG: wc_linuxkm_fpu_state_assoc called by pid %d"
" before allocate_wolfcrypt_linuxkm_fpu_states.\n", my_pid);
_warned_on_null = 1;
}
@@ -280,7 +148,7 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int c
if (create_p) {
static int _warned_on_redundant_create_p = 0;
if (_warned_on_redundant_create_p < 10) {
pr_err("wc_linuxkm_fpu_state_assoc called with create_p=1 by"
pr_err("BUG: wc_linuxkm_fpu_state_assoc called with create_p=1 by"
" pid %d on cpu %d with cpu slot already reserved by"
" said pid.\n", my_pid, my_cpu);
++_warned_on_redundant_create_p;
@@ -293,18 +161,28 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc_unlikely(int c
__atomic_store_n(&slot->pid, my_pid, __ATOMIC_RELEASE);
return slot;
} else {
/* if the slot is already occupied, that can be benign due to a
* migration, but it will require fixup by the thread that owns the
* slot, which will happen when it releases its lock, or sooner (see
* below).
/* if the slot is already occupied, that can be benign-ish due to a
* unwanted migration, or due to a process crashing in kernel mode.
* it will require fixup either here, or by the thread that owns the
* slot, which will happen when it releases its lock.
*/
static int _warned_on_mismatched_pid = 0;
if (_warned_on_mismatched_pid < 10) {
pr_warn("wc_linuxkm_fpu_state_assoc called by pid %d on cpu %d"
" but cpu slot already reserved by pid %d.\n",
my_pid, my_cpu, slot_pid);
++_warned_on_mismatched_pid;
if (find_get_pid(slot_pid) == NULL) {
if (__atomic_compare_exchange_n(&slot->pid, &slot_pid, my_pid, 0, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE)) {
pr_warn("WARNING: wc_linuxkm_fpu_state_assoc_unlikely fixed up orphaned slot owned by dead PID %d.", slot_pid);
return slot;
}
}
{
static int _warned_on_mismatched_pid = 0;
if (_warned_on_mismatched_pid < 10) {
pr_warn("WARNING: wc_linuxkm_fpu_state_assoc called by pid %d on cpu %d"
" but cpu slot already reserved by pid %d.\n",
my_pid, my_cpu, slot_pid);
++_warned_on_mismatched_pid;
}
}
return NULL;
}
} else {
@@ -342,11 +220,21 @@ static inline struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(
* checking if the pid matches or,
* failing that, if create_p.
*/
pid_t my_pid = task_pid_nr(current), slot_pid;
pid_t my_pid, slot_pid;
struct wc_thread_fpu_count_ent *slot;
if (unlikely(wc_linuxkm_fpu_states == NULL))
return wc_linuxkm_fpu_state_assoc_unlikely(create_p);
if (unlikely(wc_linuxkm_fpu_states == NULL)) {
if (! assume_fpu_began) {
/* this was just a quick check for whether we're in a recursive
* save_vector_registers_x86(). we're not.
*/
return NULL;
}
else
return wc_linuxkm_fpu_state_assoc_unlikely(create_p);
}
my_pid = task_pid_nr(current);
slot = &wc_linuxkm_fpu_states[my_cpu];
slot_pid = __atomic_load_n(&slot->pid, __ATOMIC_CONSUME);
@@ -359,6 +247,10 @@ static inline struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(
if (! assume_fpu_began) {
/* this was just a quick check for whether we're in a recursive
* save_vector_registers_x86(). we're not.
*
* if we're in a softirq context, we'll always wind up here, because
* processes with entries in wc_linuxkm_fpu_states[] always have
* softirqs inhibited.
*/
return NULL;
}
@@ -374,17 +266,6 @@ static inline struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(
}
}
#endif /* !LINUXKM_FPU_STATES_FOLLOW_THREADS */
#ifdef WOLFSSL_COMMERCIAL_LICENSE
static struct fpstate *wc_linuxkm_fpstate_buf_from_fpu_state(
struct wc_thread_fpu_count_ent *state)
{
size_t i = (size_t)(state - wc_linuxkm_fpu_states) / sizeof(*state);
return &wc_linuxkm_fpu_savebufs[i].fpstate;
}
#endif
static void wc_linuxkm_fpu_state_release_unlikely(
struct wc_thread_fpu_count_ent *ent)
{
@@ -410,16 +291,26 @@ static inline void wc_linuxkm_fpu_state_release(
WARN_UNUSED_RESULT int can_save_vector_registers_x86(void)
{
/* First, check if we're already saved, per wc_linuxkm_fpu_states.
struct wc_thread_fpu_count_ent *pstate;
/* check for hard interrupt context (unusable current->pid) preemptively.
* if we're in a softirq context we'll catch that below with
* irq_fpu_usable().
*/
if (((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0) || (task_pid_nr(current) == 0))
return 0;
/* Check if we're already saved, per wc_linuxkm_fpu_states.
*
* On kernel >= 6.15, irq_fpu_usable() dumps a backtrace to the kernel log
* if called while already saved, so it's crucial to preempt that call by
* checking wc_linuxkm_fpu_states.
*/
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0, 0);
pstate = wc_linuxkm_fpu_state_assoc(0, 0);
if ((pstate != NULL) && (pstate->fpu_state != 0U)) {
if (unlikely(pstate->fpu_state & WC_FPU_INHIBITED_FLAG))
return 0;
if (unlikely((pstate->fpu_state & WC_FPU_COUNT_MASK)
== WC_FPU_COUNT_MASK))
{
@@ -459,25 +350,34 @@ WARN_UNUSED_RESULT int can_save_vector_registers_x86(void)
#endif
}
WARN_UNUSED_RESULT int save_vector_registers_x86(void)
WARN_UNUSED_RESULT int save_vector_registers_x86(int inhibit_p)
{
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(1, 0);
#else
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0, 0);
#endif
struct wc_thread_fpu_count_ent *pstate;
/* check for hard interrupt context (unusable current->pid) preemptively.
* if we're in a softirq context we'll catch that below with
* irq_fpu_usable().
*/
if (((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0) || (task_pid_nr(current) == 0))
return WC_ACCEL_INHIBIT_E;
pstate = wc_linuxkm_fpu_state_assoc(0, 0);
/* allow for nested calls */
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
if (pstate == NULL)
return MEMORY_E;
#endif
if (
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
(pstate != NULL) &&
#endif
(pstate->fpu_state != 0U))
{
if (pstate && (pstate->fpu_state != 0U)) {
if (unlikely(pstate->fpu_state & WC_FPU_INHIBITED_FLAG)) {
if (inhibit_p) {
/* allow recursive inhibit calls as long as the whole stack of
* them is inhibiting.
*/
++pstate->fpu_state;
return 0;
}
else
return WC_ACCEL_INHIBIT_E;
}
if (unlikely(inhibit_p))
return BAD_STATE_E;
if (unlikely((pstate->fpu_state & WC_FPU_COUNT_MASK)
== WC_FPU_COUNT_MASK))
{
@@ -490,6 +390,37 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
}
}
if (inhibit_p) {
if (in_softirq())
return WC_ACCEL_INHIBIT_E;
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
/* inhibit migration, which gums up the algorithm in
* kernel_fpu_{begin,end}().
*/
migrate_disable();
#endif
/* we need to inhibit softirqs to assure that we can support recursive
* calls safely, i.e. without mistaking a softirq context for a
* recursion.
*/
local_bh_disable();
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
if (pstate == NULL) {
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
local_bh_enable();
return BAD_STATE_E;
}
pstate->fpu_state =
WC_FPU_INHIBITED_FLAG + 1U;
return 0;
}
if (irq_fpu_usable()
#if defined(TIF_NEED_FPU_LOAD) && \
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) && \
@@ -514,12 +445,6 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
* that corrupts the register state.
*/
#ifdef WOLFSSL_COMMERCIAL_LICENSE
struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
fpregs_lock();
fpstate->xfeatures = ~0UL;
os_xsave(fpstate);
#else /* !WOLFSSL_COMMERCIAL_LICENSE */
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
/* inhibit migration, which gums up the algorithm in
@@ -528,31 +453,23 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
migrate_disable();
#endif
kernel_fpu_begin();
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
if (pstate == NULL) {
kernel_fpu_end();
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
return BAD_STATE_E;
}
#endif
#endif /* !WOLFSSL_COMMERCIAL_LICENSE */
/* set msb to 0 to trigger kernel_fpu_end() at cleanup. */
pstate->fpu_state = 1U;
} else if (in_nmi() || (hardirq_count() > 0) || (softirq_count() > 0)) {
static int warned_fpu_forbidden = 0;
if (! warned_fpu_forbidden)
pr_err("save_vector_registers_x86 called from IRQ handler.\n");
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
return BAD_STATE_E;
return WC_ACCEL_INHIBIT_E;
}
#if defined(TIF_NEED_FPU_LOAD) && \
(LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0)) && \
@@ -566,25 +483,22 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
*/
preempt_disable();
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_disable();
#endif
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
pstate = wc_linuxkm_fpu_state_assoc(1, 1);
if (pstate == NULL) {
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
preempt_enable();
return BAD_STATE_E;
}
#endif
/* set msb to 1 to inhibit kernel_fpu_end() at cleanup. */
/* set _ALREADY_ flag to 1 to inhibit kernel_fpu_end() at cleanup. */
pstate->fpu_state =
WC_FPU_SAVED_MASK + 1U;
WC_FPU_ALREADY_FLAG + 1U;
}
#endif /* TIF_NEED_FPU_LOAD && <5.17.0 && !5.10.180+ */
else {
@@ -594,10 +508,7 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
" thread without previous FPU save.\n");
warned_fpu_forbidden = 1;
}
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
return BAD_STATE_E;
return WC_ACCEL_INHIBIT_E;
}
return 0;
@@ -605,9 +516,17 @@ WARN_UNUSED_RESULT int save_vector_registers_x86(void)
void restore_vector_registers_x86(void)
{
struct wc_thread_fpu_count_ent *pstate = wc_linuxkm_fpu_state_assoc(0, 1);
struct wc_thread_fpu_count_ent *pstate;
if (in_nmi() || hardirq_count() || (task_pid_nr(current) == 0)) {
pr_warn("BUG: restore_vector_registers_x86() called from interrupt handler on CPU %d.",
raw_smp_processor_id());
return;
}
pstate = wc_linuxkm_fpu_state_assoc(0, 1);
if (unlikely(pstate == NULL)) {
pr_err("restore_vector_registers_x86 called by pid %d on CPU %d "
pr_warn("BUG: restore_vector_registers_x86() called by pid %d on CPU %d "
"with no saved state.\n", task_pid_nr(current),
raw_smp_processor_id());
return;
@@ -618,32 +537,21 @@ void restore_vector_registers_x86(void)
}
if (pstate->fpu_state == 0U) {
#ifdef WOLFSSL_COMMERCIAL_LICENSE
struct fpstate *fpstate = wc_linuxkm_fpstate_buf_from_fpu_state(pstate);
os_xrstor(fpstate, fpstate->xfeatures);
fpregs_unlock();
#else
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
kernel_fpu_end();
#endif
} else {
} else if (unlikely(pstate->fpu_state & WC_FPU_INHIBITED_FLAG)) {
pstate->fpu_state = 0U;
wc_linuxkm_fpu_state_release(pstate);
local_bh_enable();
} else if (unlikely(pstate->fpu_state & WC_FPU_ALREADY_FLAG)) {
pstate->fpu_state = 0U;
#ifndef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
preempt_enable();
}
#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_COUNT) && \
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0)) && \
!defined(WOLFSSL_COMMERCIAL_LICENSE)
(LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0))
migrate_enable();
#endif
#ifdef LINUXKM_FPU_STATES_FOLLOW_THREADS
wc_linuxkm_fpu_state_release(pstate);
#endif
return;
}

View File

@@ -647,6 +647,9 @@ const char* wc_GetErrorString(int error)
case ASCON_AUTH_E:
return "ASCON Authentication check fail";
case WC_ACCEL_INHIBIT_E:
return "Crypto acceleration is currently inhibited";
case MAX_CODE_E:
case WC_SPAN1_MIN_CODE_E:
case MIN_CODE_E:

View File

@@ -189,8 +189,7 @@ This library contains implementation for the random number generator.
{
intel_flags = cpuid_get_flags();
}
#if (defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)) && \
!defined(WOLFSSL_LINUXKM)
#if defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)
static int wc_GenerateSeed_IntelRD(OS_Seed* os, byte* output, word32 sz);
#endif
#ifdef HAVE_INTEL_RDRAND
@@ -254,7 +253,12 @@ This library contains implementation for the random number generator.
#endif
#elif defined(HAVE_AMD_RDSEED)
/* This will yield a SEED_SZ of 16kb. Since nonceSz will be 0,
* we'll add an additional 8kb on top. */
* we'll add an additional 8kb on top.
*
* See "AMD RNG ESV Public Use Document". Version 0.7 of October 24,
* 2024 specifies 0.656 to 1.312 bits of entropy per 128 bit block of
* RDSEED output, depending on CPU family.
*/
#define ENTROPY_SCALE_FACTOR (512)
#elif defined(HAVE_INTEL_RDSEED) || defined(HAVE_INTEL_RDRAND)
/* The value of 2 applies to Intel's RDSEED which provides about
@@ -369,7 +373,7 @@ static int Hash_df(DRBG_internal* drbg, byte* out, word32 outSz, byte type,
#else
wc_Sha256 sha[1];
#endif
#ifdef WOLFSSL_SMALL_STACK
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_LINUXKM)
byte* digest;
#else
byte digest[WC_SHA256_DIGEST_SIZE];
@@ -379,7 +383,7 @@ static int Hash_df(DRBG_internal* drbg, byte* out, word32 outSz, byte type,
return DRBG_FAILURE;
}
#ifdef WOLFSSL_SMALL_STACK
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_LINUXKM)
digest = (byte*)XMALLOC(WC_SHA256_DIGEST_SIZE, drbg->heap,
DYNAMIC_TYPE_DIGEST);
if (digest == NULL)
@@ -440,7 +444,7 @@ static int Hash_df(DRBG_internal* drbg, byte* out, word32 outSz, byte type,
ForceZero(digest, WC_SHA256_DIGEST_SIZE);
#ifdef WOLFSSL_SMALL_STACK
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_LINUXKM)
XFREE(digest, drbg->heap, DYNAMIC_TYPE_DIGEST);
#endif
@@ -638,7 +642,11 @@ static int Hash_DRBG_Generate(DRBG_internal* drbg, byte* out, word32 outSz)
wc_Sha256 sha[1];
#endif
byte type;
#ifdef WORD64_AVAILABLE
word64 reseedCtr;
#else
word32 reseedCtr;
#endif
if (drbg == NULL) {
return DRBG_FAILURE;
@@ -688,7 +696,11 @@ static int Hash_DRBG_Generate(DRBG_internal* drbg, byte* out, word32 outSz)
array_add(drbg->V, sizeof(drbg->V), digest, WC_SHA256_DIGEST_SIZE);
array_add(drbg->V, sizeof(drbg->V), drbg->C, sizeof(drbg->C));
#ifdef LITTLE_ENDIAN_ORDER
#ifdef WORD64_AVAILABLE
reseedCtr = ByteReverseWord64(reseedCtr);
#else
reseedCtr = ByteReverseWord32(reseedCtr);
#endif
#endif
array_add(drbg->V, sizeof(drbg->V),
(byte*)&reseedCtr, sizeof(reseedCtr));
@@ -2573,7 +2585,6 @@ static WC_INLINE int IntelRDseed64_r(word64* rnd)
return -1;
}
#ifndef WOLFSSL_LINUXKM
/* return 0 on success */
static int wc_GenerateSeed_IntelRD(OS_Seed* os, byte* output, word32 sz)
{
@@ -2604,7 +2615,6 @@ static int wc_GenerateSeed_IntelRD(OS_Seed* os, byte* output, word32 sz)
return 0;
}
#endif
#endif /* HAVE_INTEL_RDSEED || HAVE_AMD_RDSEED */
@@ -3778,16 +3788,69 @@ int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
#endif /* end WOLFSSL_ESPIDF */
#elif defined(WOLFSSL_LINUXKM)
/* When registering the kernel default DRBG with a native/intrinsic entropy
* source, fallback to get_random_bytes() isn't allowed because we replace
* it with our DRBG.
*/
#if defined(HAVE_ENTROPY_MEMUSE) && \
defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT)
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
{
(void)os;
return wc_Entropy_Get(MAX_ENTROPY_BITS, output, sz);
}
#elif (defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)) && \
defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT)
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
{
(void)os;
return wc_GenerateSeed_IntelRD(NULL, output, sz);
}
#else /* !((HAVE_ENTROPY_MEMUSE || HAVE_*_RDSEED) && LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT) */
#include <linux/random.h>
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
{
(void)os;
int ret;
#ifdef HAVE_ENTROPY_MEMUSE
ret = wc_Entropy_Get(MAX_ENTROPY_BITS, output, sz);
if (ret == 0) {
return 0;
}
#ifdef ENTROPY_MEMUSE_FORCE_FAILURE
/* Don't fallback to /dev/urandom. */
return ret;
#endif
#endif
#if defined(HAVE_INTEL_RDSEED) || defined(HAVE_AMD_RDSEED)
if (IS_INTEL_RDSEED(intel_flags)) {
ret = wc_GenerateSeed_IntelRD(NULL, output, sz);
#ifndef FORCE_FAILURE_RDSEED
if (ret == 0)
#endif
{
return ret;
}
}
#endif /* HAVE_INTEL_RDSEED || HAVE_AMD_RDSEED */
(void)ret;
get_random_bytes(output, sz);
return 0;
}
#endif /* !(HAVE_*_RDSEED && LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT) */
#elif defined(WOLFSSL_RENESAS_TSIP)
int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)

View File

@@ -2210,32 +2210,7 @@ int wolfSSL_HwPkMutexUnLock(void)
}
#elif defined(WOLFSSL_LINUXKM)
/* Linux kernel mutex routines are voids, alas. */
int wc_InitMutex(wolfSSL_Mutex* m)
{
mutex_init(m);
return 0;
}
int wc_FreeMutex(wolfSSL_Mutex* m)
{
mutex_destroy(m);
return 0;
}
int wc_LockMutex(wolfSSL_Mutex* m)
{
mutex_lock(m);
return 0;
}
int wc_UnLockMutex(wolfSSL_Mutex* m)
{
mutex_unlock(m);
return 0;
}
/* defined as inlines in linuxkm/linuxkm_wc_port.h */
#elif defined(WOLFSSL_VXWORKS)

View File

@@ -304,11 +304,12 @@ enum wolfCrypt_ErrorCodes {
DEADLOCK_AVERTED_E = -1000, /* Deadlock averted -- retry the call */
ASCON_AUTH_E = -1001, /* ASCON Authentication check failure */
WC_ACCEL_INHIBIT_E = -1002, /* Crypto acceleration is currently inhibited */
WC_SPAN2_LAST_E = -1001, /* Update to indicate last used error code */
WC_SPAN2_LAST_E = -1002, /* Update to indicate last used error code */
WC_SPAN2_MIN_CODE_E = -1999, /* Last usable code in span 2 */
WC_LAST_E = -1001, /* the last code used either here or in
WC_LAST_E = -1002, /* the last code used either here or in
* error-ssl.h
*/

View File

@@ -164,7 +164,11 @@ struct OS_Seed {
#ifdef HAVE_HASHDRBG
struct DRBG_internal {
#ifdef WORD64_AVAILABLE
word64 reseedCtr;
#else
word32 reseedCtr;
#endif
byte V[DRBG_SEED_LEN];
byte C[DRBG_SEED_LEN];
void* heap;

View File

@@ -3625,9 +3625,6 @@ extern void uITRON4_free(void *p) ;
/* Linux Kernel Module */
#ifdef WOLFSSL_LINUXKM
#ifndef WOLFSSL_NO_GETPID
#define WOLFSSL_NO_GETPID
#endif /* WOLFSSL_NO_GETPID */
#ifdef HAVE_CONFIG_H
#include <config.h>
#undef HAVE_CONFIG_H
@@ -3681,6 +3678,9 @@ extern void uITRON4_free(void *p) ;
#undef WOLFSSL_HAVE_MAX
#undef WOLFSSL_HAVE_ASSERT_H
#define WOLFSSL_NO_ASSERT_H
#ifndef WOLFSSL_NO_GETPID
#define WOLFSSL_NO_GETPID
#endif /* WOLFSSL_NO_GETPID */
#ifndef SIZEOF_LONG
#define SIZEOF_LONG 8
#endif
@@ -3731,6 +3731,14 @@ extern void uITRON4_free(void *p) ;
#define WC_SANITIZE_ENABLE() kasan_enable_current()
#endif
#endif
#if !defined(WC_RESEED_INTERVAL) && defined(LINUXKM_LKCAPI_REGISTER)
/* If installing handlers, use the maximum reseed interval allowed by
* NIST SP 800-90A Rev. 1, to avoid unnecessary delays in DRBG
* generation.
*/
#define WC_RESEED_INTERVAL (((word64)1UL)<<48UL)
#endif
#endif
@@ -3989,12 +3997,6 @@ extern void uITRON4_free(void *p) ;
#undef HAVE_XCHACHA
#endif
#if !defined(WOLFSSL_SHA384) && !defined(WOLFSSL_SHA512) && defined(NO_AES) && \
!defined(WOLFSSL_SHA3)
#undef WOLFSSL_NO_WORD64_OPS
#define WOLFSSL_NO_WORD64_OPS
#endif
#if !defined(WOLFCRYPT_ONLY) && \
(!defined(WOLFSSL_NO_TLS12) || defined(HAVE_KEYING_MATERIAL))
#undef WOLFSSL_HAVE_PRF

View File

@@ -1964,6 +1964,22 @@ WOLFSSL_API word32 CheckRunTimeSettings(void);
#ifndef RESTORE_VECTOR_REGISTERS
#define RESTORE_VECTOR_REGISTERS() WC_DO_NOTHING
#endif
#ifdef WOLFSSL_NO_ASM
/* We define fallback no-op definitions for these only if asm is disabled,
* otherwise the using code must detect that these macros are undefined and
* provide its own non-vector implementation paths.
*
* Currently these macros are only used in WOLFSSL_LINUXKM code paths, which
* are always compiled either with substantive definitions from
* linuxkm_wc_port.h, or with WOLFSSL_NO_ASM defined.
*/
#ifndef DISABLE_VECTOR_REGISTERS
#define DISABLE_VECTOR_REGISTERS() 0
#endif
#ifndef REENABLE_VECTOR_REGISTERS
#define REENABLE_VECTOR_REGISTERS() WC_DO_NOTHING
#endif
#endif
#ifndef WC_SANITIZE_DISABLE
#define WC_SANITIZE_DISABLE() WC_DO_NOTHING