linuxkm/lkcapi_sha_glue.c:

* add drbg_init_from() and fork_default_rng(), and
* use the latter to define LKCAPI_INITRNG_FOR_SELFTEST() opportunistically (with fallback to plain wc_InitRng());

linuxkm/lkcapi_rsa_glue.c:
* add km_rsa_ctx_init_rng(),
* remove wc_InitRng() from km_rsa_ctx_init(),
* remove the WC_RSA_BLINDING gates around calls to wc_RsaSetRNG(), and
* call km_rsa_ctx_init_rng() before each call that needs an initialized RNG;

linuxkm/lkcapi_dh_glue.c and linuxkm/lkcapi_ecdh_glue.c: in km_ffdhe_init() and km_ecdh_init(), if linuxkm_lkcapi_registering_now, use LKCAPI_INITRNG_FOR_SELFTEST() to initialize ctx->rng;

linuxkm/lkcapi_glue.c: add notes that lkcapi_sha_glue inclusion and registrations must precede PK, and move declaration of linuxkm_lkcapi_registering_now to precede lkcapi glue inclusions.
This commit is contained in:
Daniel Pouzzner
2025-12-22 22:56:38 -06:00
parent 5030484bcf
commit b087533fdf
5 changed files with 279 additions and 54 deletions

View File

@@ -749,7 +749,10 @@ static int km_ffdhe_init(struct crypto_kpp *tfm, int name, word32 nbits)
ctx->name = name;
ctx->nbits = nbits;
err = wc_InitRng(&ctx->rng);
if (WOLFSSL_ATOMIC_LOAD(linuxkm_lkcapi_registering_now))
err = LKCAPI_INITRNG_FOR_SELFTEST(&ctx->rng);
else
err = wc_InitRng(&ctx->rng);
if (err) {
#ifdef WOLFKM_DEBUG_DH
pr_err("%s: init rng returned: %d\n", WOLFKM_DH_DRIVER, err);

View File

@@ -387,7 +387,10 @@ static int km_ecdh_init(struct crypto_kpp *tfm, int curve_id)
ctx->curve_len = (word32) ret;
}
ret = wc_InitRng(&ctx->rng);
if (WOLFSSL_ATOMIC_LOAD(linuxkm_lkcapi_registering_now))
ret = LKCAPI_INITRNG_FOR_SELFTEST(&ctx->rng);
else
ret = wc_InitRng(&ctx->rng);
if (ret) {
#ifdef WOLFKM_DEBUG_ECDH
pr_err("%s: init rng returned: %d\n", WOLFKM_ECDH_DRIVER, ret);

View File

@@ -212,8 +212,13 @@ WC_MAYBE_UNUSED static int check_shash_driver_masking(struct crypto_shash *tfm,
#endif
}
static wolfSSL_Atomic_Int linuxkm_lkcapi_registering_now = WOLFSSL_ATOMIC_INITIALIZER(0);
#include "lkcapi_aes_glue.c"
#include "lkcapi_sha_glue.c"
#include "lkcapi_sha_glue.c" /* must be included before the PK glue, to make the
* crypto_default_rng usable therein when
* LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT.
*/
#include "lkcapi_ecdsa_glue.c"
#include "lkcapi_ecdh_glue.c"
#include "lkcapi_rsa_glue.c"
@@ -311,7 +316,6 @@ static int linuxkm_lkcapi_sysfs_deinstall(void) {
return 0;
}
static wolfSSL_Atomic_Int linuxkm_lkcapi_registering_now = WOLFSSL_ATOMIC_INITIALIZER(0);
static int linuxkm_lkcapi_registered = 0;
static int linuxkm_lkcapi_n_registered = 0;
@@ -475,6 +479,9 @@ static int linuxkm_lkcapi_register(void)
REGISTER_ALG(ecbAesAlg, skcipher, linuxkm_test_aesecb);
#endif
/* SHA algs must be registered before PK algs, to make the crypto_default_rng
* available beforehand when LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT.
*/
#ifdef LINUXKM_LKCAPI_REGISTER_SHA1_HMAC
REGISTER_ALG(sha1_hmac_alg, shash, linuxkm_test_sha1_hmac);
#endif

View File

@@ -557,16 +557,6 @@ static int km_rsa_ctx_init(struct km_rsa_ctx * ctx, int hash_oid)
goto out;
}
ret = wc_InitRng(&ctx->rng);
if (ret) {
pr_err("%s: init rng returned: %d\n", WOLFKM_RSA_DRIVER, ret);
if (ret == WC_NO_ERR_TRACE(MEMORY_E))
ret = -ENOMEM;
else
ret = -EINVAL;
goto out;
}
ret = wc_InitRsaKey(ctx->key, NULL);
if (ret) {
pr_err("%s: init rsa key returned: %d\n", WOLFKM_RSA_DRIVER, ret);
@@ -574,13 +564,12 @@ static int km_rsa_ctx_init(struct km_rsa_ctx * ctx, int hash_oid)
goto out;
}
#ifdef WC_RSA_BLINDING
/* Note the initialization of ctx->rng is deferred unless/until needed. */
ret = wc_RsaSetRNG(ctx->key, &ctx->rng);
if (ret) {
ret = -EINVAL;
goto out;
}
#endif /* WC_RSA_BLINDING */
ctx->hash_oid = hash_oid;
@@ -638,6 +627,31 @@ out:
return ret;
}
static inline int km_rsa_ctx_init_rng(struct km_rsa_ctx * ctx) {
switch (ctx->rng.status) {
case WC_DRBG_OK:
return 0;
case WC_DRBG_NOT_INIT:
{
int err;
if (WOLFSSL_ATOMIC_LOAD(linuxkm_lkcapi_registering_now))
err = LKCAPI_INITRNG_FOR_SELFTEST(&ctx->rng);
else
err = wc_InitRng(&ctx->rng);
if (err) {
pr_err("%s: init rng returned: %d\n", WOLFKM_RSA_DRIVER, err);
if (err == WC_NO_ERR_TRACE(MEMORY_E))
return -ENOMEM;
else
return -EINVAL;
}
return 0;
}
default:
return -EINVAL;
}
}
#if defined(LINUXKM_DIRECT_RSA)
/*
* RSA encrypt with public key.
@@ -704,8 +718,15 @@ static int km_direct_rsa_enc(struct akcipher_request *req)
scatterwalk_map_and_copy(dec, req->src, 0, req->src_len, 0);
/* note: matching behavior of kernel rsa-generic. */
/* note, currently WOLF_CRYPTO_CB is not supported for linuxkm, and the rng
* are to wc_RsaFunction() is not actually used for low level (no-pad)
* public key ops in the native implementation (it is a pure function of its
* input args).
*/
err = wc_RsaFunction(dec, req->src_len, enc, &out_len,
RSA_PUBLIC_ENCRYPT, ctx->key, &ctx->rng);
RSA_PUBLIC_ENCRYPT, ctx->key, NULL /* rng */);
if (unlikely(err || (out_len != ctx->key_len))) {
#ifdef WOLFKM_DEBUG_RSA
@@ -789,6 +810,10 @@ static int km_direct_rsa_dec(struct akcipher_request *req)
memset(dec, 0, req->dst_len);
scatterwalk_map_and_copy(enc, req->src, 0, req->src_len, 0);
err = km_rsa_ctx_init_rng(ctx);
if (err)
goto rsa_dec_out;
err = wc_RsaDirect(enc, ctx->key_len, dec, &out_len,
ctx->key, RSA_PRIVATE_DECRYPT, &ctx->rng);
@@ -848,12 +873,11 @@ static int km_rsa_set_priv(struct crypto_akcipher *tfm, const void *key,
return -ENOMEM;
}
#ifdef WC_RSA_BLINDING
/* Note the initialization of ctx->rng is deferred unless/until needed. */
err = wc_RsaSetRNG(ctx->key, &ctx->rng);
if (unlikely(err)) {
return -ENOMEM;
}
#endif /* WC_RSA_BLINDING */
}
err = wc_RsaPrivateKeyDecode(key, &idx, ctx->key, keylen);
@@ -907,6 +931,12 @@ static int km_rsa_set_pub(struct crypto_akcipher *tfm, const void *key,
if (unlikely(err)) {
return -ENOMEM;
}
/* Note the initialization of ctx->rng is deferred unless/until needed. */
err = wc_RsaSetRNG(ctx->key, &ctx->rng);
if (unlikely(err)) {
return -ENOMEM;
}
}
err = wc_RsaPublicKeyDecode(key, &idx, ctx->key, keylen);
@@ -1114,6 +1144,10 @@ static int km_pkcs1pad_sign(struct akcipher_request *req)
goto pkcs1pad_sign_out;
}
err = km_rsa_ctx_init_rng(ctx);
if (err)
goto pkcs1pad_sign_out;
/* sign encoded message. */
sig_len = wc_RsaSSL_Sign(msg, enc_len, sig,
ctx->key_len, ctx->key, &ctx->rng);
@@ -1355,6 +1389,10 @@ static int km_pkcs1_sign(struct crypto_sig *tfm,
goto pkcs1_sign_out;
}
err = km_rsa_ctx_init_rng(ctx);
if (err)
goto pkcs1_sign_out;
/* sign encoded message. */
sig_len = wc_RsaSSL_Sign(msg, enc_msg_len, sig,
ctx->key_len, ctx->key, &ctx->rng);
@@ -1522,12 +1560,11 @@ static int km_pkcs1_set_priv(struct crypto_sig *tfm, const void *key,
return -ENOMEM;
}
#ifdef WC_RSA_BLINDING
/* Note the initialization of ctx->rng is deferred unless/until needed. */
err = wc_RsaSetRNG(ctx->key, &ctx->rng);
if (unlikely(err)) {
return -ENOMEM;
}
#endif /* WC_RSA_BLINDING */
}
err = wc_RsaPrivateKeyDecode(key, &idx, ctx->key, keylen);
@@ -1667,6 +1704,10 @@ static int km_pkcs1pad_enc(struct akcipher_request *req)
memset(enc, 0, req->dst_len);
scatterwalk_map_and_copy(dec, req->src, 0, req->src_len, 0);
err = km_rsa_ctx_init_rng(ctx);
if (err)
goto pkcs1_enc_out;
err = wc_RsaPublicEncrypt(dec, req->src_len, enc, ctx->key_len,
ctx->key, &ctx->rng);
@@ -1741,6 +1782,12 @@ static int km_pkcs1pad_dec(struct akcipher_request *req)
memset(dec, 0, req->dst_len);
scatterwalk_map_and_copy(enc, req->src, 0, req->src_len, 0);
#ifdef WC_RSA_BLINDING
err = km_rsa_ctx_init_rng(ctx);
if (err)
goto pkcs1_dec_out;
#endif
dec_len = wc_RsaPrivateDecrypt(enc, ctx->key_len, dec, req->dst_len,
ctx->key);
@@ -2054,7 +2101,8 @@ static int linuxkm_test_rsa_driver(const char * driver, int nbits)
memset(&rng, 0, sizeof(rng));
memset(key, 0, sizeof(RsaKey));
ret = wc_InitRng(&rng);
ret = LKCAPI_INITRNG_FOR_SELFTEST(&rng);
if (ret) {
pr_err("error: init rng returned: %d\n", ret);
goto test_rsa_end;
@@ -2068,13 +2116,11 @@ static int linuxkm_test_rsa_driver(const char * driver, int nbits)
}
init_key = 1;
#ifdef WC_RSA_BLINDING
ret = wc_RsaSetRNG(key, &rng);
if (ret) {
pr_err("error: rsa set rng returned: %d\n", ret);
goto test_rsa_end;
}
#endif /* WC_RSA_BLINDING */
#ifdef HAVE_FIPS
for (;;) {
@@ -2425,7 +2471,7 @@ static int linuxkm_test_pkcs1pad_driver(const char * driver, int nbits,
memset(&rng, 0, sizeof(rng));
memset(key, 0, sizeof(RsaKey));
ret = wc_InitRng(&rng);
ret = LKCAPI_INITRNG_FOR_SELFTEST(&rng);
if (ret) {
pr_err("error: init rng returned: %d\n", ret);
goto test_pkcs1_end;
@@ -2440,14 +2486,12 @@ static int linuxkm_test_pkcs1pad_driver(const char * driver, int nbits,
}
init_key = 1;
#ifdef WC_RSA_BLINDING
ret = wc_RsaSetRNG(key, &rng);
if (ret) {
pr_err("error: rsa set rng returned: %d\n", ret);
test_rc = ret;
goto test_pkcs1_end;
}
#endif /* WC_RSA_BLINDING */
#ifdef HAVE_FIPS
for (;;) {
@@ -2935,7 +2979,7 @@ static int linuxkm_test_pkcs1_driver(const char * driver, int nbits,
memset(&rng, 0, sizeof(rng));
memset(key, 0, sizeof(RsaKey));
ret = wc_InitRng(&rng);
ret = LKCAPI_INITRNG_FOR_SELFTEST(&rng);
if (ret) {
pr_err("error: init rng returned: %d\n", ret);
goto test_pkcs1_end;
@@ -2950,14 +2994,12 @@ static int linuxkm_test_pkcs1_driver(const char * driver, int nbits,
}
init_key = 1;
#ifdef WC_RSA_BLINDING
ret = wc_RsaSetRNG(key, &rng);
if (ret) {
pr_err("error: rsa set rng returned: %d\n", ret);
test_rc = ret;
goto test_pkcs1_end;
}
#endif /* WC_RSA_BLINDING */
#ifdef HAVE_FIPS
for (;;) {

View File

@@ -998,7 +998,7 @@ static int wc_linuxkm_drbg_init_tfm(struct crypto_tfm *tfm)
int need_reenable_vec = 0;
int can_sleep = (preempt_count() == 0);
ctx->n_rngs = max(4, nr_cpu_ids);
ctx->n_rngs = nr_cpu_ids + 4;
ctx->rngs = (struct wc_rng_inst *)malloc(sizeof(*ctx->rngs) * ctx->n_rngs);
if (! ctx->rngs) {
ctx->n_rngs = 0;
@@ -1129,6 +1129,199 @@ static inline void put_drbg(struct wc_rng_inst *drbg) {
}
}
#if defined(LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT) && defined(HAVE_HASHDRBG)
static inline struct crypto_rng *get_crypto_default_rng(void) {
struct crypto_rng *current_crypto_default_rng = crypto_default_rng;
if (unlikely(! wc_linuxkm_drbg_default_instance_registered)) {
pr_warn("BUG: get_default_drbg_ctx() called without wc_linuxkm_drbg_default_instance_registered.");
return NULL;
}
/* note we can't call crypto_get_default_rng(), because it uses a mutex
* (not allowed in interrupt handlers). we do however sanity-check the
* cra_init function pointer, and these handlers are protected by
* random_bytes_cb_refcnt in the patched drivers/char/random.c.
*/
if (current_crypto_default_rng->base.__crt_alg->cra_init != wc_linuxkm_drbg_init_tfm) {
pr_err("BUG: get_default_drbg_ctx() found wrong crypto_default_rng \"%s\"\n", crypto_tfm_alg_driver_name(&current_crypto_default_rng->base));
crypto_put_default_rng();
return NULL;
}
return current_crypto_default_rng;
}
static int drbg_init_from(WC_RNG *source_rng, struct DRBG_internal* dest_drbg) {
int ret;
int need_vec_reenable;
XMEMSET(dest_drbg, 0, sizeof(struct DRBG_internal));
need_vec_reenable = (DISABLE_VECTOR_REGISTERS() == 0);
/* Don't copy out the low level DRBG itself -- it contains sensitive secret
* state. Instead, use it to generate fresh V and C values in a
* non-intrusive way.
*/
ret = wc_RNG_GenerateBlock(source_rng, dest_drbg->V, sizeof dest_drbg->V);
if (ret != 0) {
pr_err("drbg_init_from: wc_RNG_GenerateBlock for V returned %d\n", ret);
goto out;
}
ret = wc_RNG_GenerateBlock(source_rng, dest_drbg->C, sizeof dest_drbg->C);
if (ret != 0) {
pr_err("drbg_init_from: wc_RNG_GenerateBlock for C returned %d\n", ret);
goto out;
}
dest_drbg->heap = source_rng->heap;
#if defined(WOLFSSL_ASYNC_CRYPT) || defined(WOLF_CRYPTO_CB)
dest_drbg->devId = source_rng->devId;
#endif
ret = wc_InitSha256_ex(&dest_drbg->sha256, dest_drbg->heap,
#if defined(WOLFSSL_ASYNC_CRYPT) || defined(WOLF_CRYPTO_CB)
source_rng->dev_id
#else
INVALID_DEVID
#endif
);
if (ret != 0)
goto out;
dest_drbg->reseedCtr = 1;
ret = 0;
out:
if (need_vec_reenable)
REENABLE_VECTOR_REGISTERS();
return ret;
}
/* fork_default_rng() is a non-FIPS-compliant helper function to initialize an
* RNG for glue layer POSTs. Direct replacement for wc_InitRng(), and secure in
* principle, but not permissible to use as such in FIPS runtimes.
*/
static WC_MAYBE_UNUSED int fork_default_rng(WC_RNG *forked_rng) {
struct crypto_rng *current_crypto_default_rng;
struct wc_rng_inst *rng = NULL;
struct DRBG_internal *drbg = NULL;
struct DRBG_internal *drbg_scratch = NULL;
byte *health_check_scratch = NULL;
byte *newSeed_buf = NULL;
int ret;
if (forked_rng == NULL)
return BAD_FUNC_ARG;
XMEMSET(forked_rng, 0, sizeof *forked_rng);
health_check_scratch =
(byte *)XMALLOC(RNG_HEALTH_TEST_CHECK_SIZE, NULL,
DYNAMIC_TYPE_TMP_BUFFER);
if (health_check_scratch == NULL) {
ret = MEMORY_E;
goto out;
}
newSeed_buf = (byte*)XMALLOC(WC_DRBG_SEED_SZ +
WC_DRBG_SEED_BLOCK_SZ,
NULL,
DYNAMIC_TYPE_SEED);
if (newSeed_buf == NULL) {
ret = MEMORY_E;
goto out;
}
drbg = (struct DRBG_internal *)XMALLOC(sizeof *drbg, NULL,
DYNAMIC_TYPE_RNG);
if (drbg == NULL) {
ret = MEMORY_E;
goto out;
}
drbg_scratch =
(struct DRBG_internal *)XMALLOC(sizeof *drbg_scratch, NULL,
DYNAMIC_TYPE_RNG);
if (drbg_scratch == NULL) {
ret = MEMORY_E;
goto out;
}
current_crypto_default_rng = get_crypto_default_rng();
if (current_crypto_default_rng == NULL) {
ret = BAD_STATE_E;
goto out;
}
rng = get_drbg(current_crypto_default_rng);
if (rng == NULL) {
ret = BAD_STATE_E;
goto out;
}
if (rng->rng.status != WC_DRBG_OK) {
pr_err("fork_default_rng: rng->rng.status = %d\n", rng->rng.status);
ret = RNG_FAILURE_E;
goto out;
}
XMEMCPY(forked_rng, &rng->rng, sizeof *forked_rng);
forked_rng->drbg = (struct DRBG *)drbg;
forked_rng->drbg_scratch = drbg_scratch;
forked_rng->health_check_scratch = health_check_scratch;
forked_rng->newSeed_buf = newSeed_buf;
ret = drbg_init_from(&rng->rng, (struct DRBG_internal*)forked_rng->drbg);
if (ret != 0)
goto out;
ret = drbg_init_from(&rng->rng, (struct DRBG_internal*)forked_rng->drbg_scratch);
if (ret != 0)
goto out;
put_drbg(rng);
rng = NULL;
{
byte scratch[4];
ret = wc_RNG_GenerateBlock(forked_rng, scratch, sizeof scratch);
if (ret != 0)
goto out;
}
ret = 0;
out:
if (ret == 0)
return ret;
else {
if (rng)
put_drbg(rng);
XFREE(drbg, rng->rng.heap, DYNAMIC_TYPE_RNG);
XFREE(drbg_scratch, rng->rng.heap, DYNAMIC_TYPE_RNG);
XFREE(health_check_scratch, rng->rng.heap, DYNAMIC_TYPE_RNG);
XFREE(newSeed_buf, rng->rng.heap, DYNAMIC_TYPE_RNG);
pr_warn("WARNING: fork_default_rng: ret=%d; falling through to wc_InitRng()\n", ret);
return wc_InitRng(forked_rng);
}
}
#define LKCAPI_INITRNG_FOR_SELFTEST(rng) fork_default_rng(rng)
#else /* !LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT || !HAVE_HASHDRBG */
#define LKCAPI_INITRNG_FOR_SELFTEST(rng) wc_InitRng(rng)
#endif /* !LINUXKM_LKCAPI_REGISTER_HASH_DRBG_DEFAULT || !HAVE_HASHDRBG */
static int wc_linuxkm_drbg_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
@@ -1322,29 +1515,6 @@ static int wc_linuxkm_drbg_loaded = 0;
#ifdef WOLFSSL_LINUXKM_HAVE_GET_RANDOM_CALLBACKS
static inline struct crypto_rng *get_crypto_default_rng(void) {
struct crypto_rng *current_crypto_default_rng = crypto_default_rng;
if (unlikely(! wc_linuxkm_drbg_default_instance_registered)) {
pr_warn("BUG: get_default_drbg_ctx() called without wc_linuxkm_drbg_default_instance_registered.");
return NULL;
}
/* note we can't call crypto_get_default_rng(), because it uses a mutex
* (not allowed in interrupt handlers). we do however sanity-check the
* cra_init function pointer, and these handlers are protected by
* random_bytes_cb_refcnt in the patched drivers/char/random.c.
*/
if (current_crypto_default_rng->base.__crt_alg->cra_init != wc_linuxkm_drbg_init_tfm) {
pr_err("BUG: get_default_drbg_ctx() found wrong crypto_default_rng \"%s\"\n", crypto_tfm_alg_driver_name(&current_crypto_default_rng->base));
crypto_put_default_rng();
return NULL;
}
return current_crypto_default_rng;
}
static inline struct wc_linuxkm_drbg_ctx *get_default_drbg_ctx(void) {
struct crypto_rng *current_crypto_default_rng = get_crypto_default_rng();
struct wc_linuxkm_drbg_ctx *ctx = (current_crypto_default_rng ? (struct wc_linuxkm_drbg_ctx *)crypto_rng_ctx(current_crypto_default_rng) : NULL);