Merge pull request #8567 from douzzer/20250317-linuxkm-AES-GCM

20250317-linuxkm-AES-GCM
This commit is contained in:
philljj
2025-03-19 12:48:10 -04:00
committed by GitHub
3 changed files with 256 additions and 137 deletions

View File

@ -9069,15 +9069,13 @@ then
for lkcapi_alg in $(echo "$ENABLED_LINUXKM_LKCAPI_REGISTER" | tr ',' ' ') for lkcapi_alg in $(echo "$ENABLED_LINUXKM_LKCAPI_REGISTER" | tr ',' ' ')
do do
case "$lkcapi_alg" in case "$lkcapi_alg" in
all) test "$ENABLED_EXPERIMENTAL" = "yes" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: requires --enable-experimental.]) all) AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_ALL" ;;
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_ALL" ;;
'cbc(aes)') test "$ENABLED_AESCBC" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CBC implementation not enabled.]) 'cbc(aes)') test "$ENABLED_AESCBC" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CBC implementation not enabled.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCBC" ;; AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCBC" ;;
'cfb(aes)') test "$ENABLED_AESCFB" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CFB implementation not enabled.]) 'cfb(aes)') test "$ENABLED_AESCFB" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CFB implementation not enabled.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCFB" ;; AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCFB" ;;
'gcm(aes)') test "$ENABLED_AESGCM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-GCM implementation not enabled.]) 'gcm(aes)') test "$ENABLED_AESGCM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-GCM implementation not enabled.])
test "$ENABLED_AESGCM_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesgcm-stream is required for LKCAPI.]) test "$ENABLED_AESGCM_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesgcm-stream is required for LKCAPI.])
test "$ENABLED_EXPERIMENTAL" = "yes" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: requires --enable-experimental.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESGCM" ;; AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESGCM" ;;
'xts(aes)') test "$ENABLED_AESXTS" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-XTS implementation not enabled.]) 'xts(aes)') test "$ENABLED_AESXTS" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-XTS implementation not enabled.])
test "$ENABLED_AESXTS_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesxts-stream is required for LKCAPI.]) test "$ENABLED_AESXTS_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesxts-stream is required for LKCAPI.])

View File

@ -115,9 +115,6 @@ static int linuxkm_test_aescfb(void);
#if defined(HAVE_AESGCM) && \ #if defined(HAVE_AESGCM) && \
(defined(LINUXKM_LKCAPI_REGISTER_ALL) || \ (defined(LINUXKM_LKCAPI_REGISTER_ALL) || \
defined(LINUXKM_LKCAPI_REGISTER_AESGCM)) defined(LINUXKM_LKCAPI_REGISTER_AESGCM))
#ifndef WOLFSSL_EXPERIMENTAL_SETTINGS
#error Experimental settings without WOLFSSL_EXPERIMENTAL_SETTINGS
#endif
static int linuxkm_test_aesgcm(void); static int linuxkm_test_aesgcm(void);
#endif #endif
#if defined(WOLFSSL_AES_XTS) && \ #if defined(WOLFSSL_AES_XTS) && \
@ -280,7 +277,8 @@ static int km_AesCbcEncrypt(struct skcipher_request *req)
struct km_AesCtx * ctx = NULL; struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
unsigned int nbytes = 0; unsigned int nbytes = 0;
int err = 0; int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req); tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm); ctx = crypto_skcipher_ctx(tfm);
@ -291,22 +289,30 @@ static int km_AesCbcEncrypt(struct skcipher_request *req)
return err; return err;
} }
err = wc_AesSetIV(ctx->aes_encrypt, walk.iv); /* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes));
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesSetIV failed: %d\n", pr_err("%s: wc_AesSetIV failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
while ((nbytes = walk.nbytes) != 0) { while ((nbytes = walk.nbytes) != 0) {
err = wc_AesCbcEncrypt(ctx->aes_encrypt, walk.dst.virt.addr, err = wc_AesCbcEncrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, nbytes & (~(WC_AES_BLOCK_SIZE - 1))); walk.src.virt.addr, nbytes & (~(WC_AES_BLOCK_SIZE - 1)));
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesCbcEncrypt failed for %u bytes: %d\n", pr_err("%s: wc_AesCbcEncrypt failed for %u bytes: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), nbytes, err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), nbytes, err);
return -EINVAL; err = -EINVAL;
goto out;
} }
nbytes &= WC_AES_BLOCK_SIZE - 1; nbytes &= WC_AES_BLOCK_SIZE - 1;
@ -314,7 +320,11 @@ static int km_AesCbcEncrypt(struct skcipher_request *req)
} }
/* copy iv from wolfCrypt back to walk.iv */ /* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_encrypt->reg, WC_AES_BLOCK_SIZE); XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err; return err;
} }
@ -325,7 +335,8 @@ static int km_AesCbcDecrypt(struct skcipher_request *req)
struct km_AesCtx * ctx = NULL; struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
unsigned int nbytes = 0; unsigned int nbytes = 0;
int err = 0; int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req); tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm); ctx = crypto_skcipher_ctx(tfm);
@ -336,23 +347,31 @@ static int km_AesCbcDecrypt(struct skcipher_request *req)
return err; return err;
} }
err = wc_AesSetIV(ctx->aes_decrypt, walk.iv); /* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_decrypt, sizeof(Aes));
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) { if (unlikely(err)) {
if (! disable_setkey_warnings) if (! disable_setkey_warnings)
pr_err("%s: wc_AesSetKey failed: %d\n", pr_err("%s: wc_AesSetKey failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
while ((nbytes = walk.nbytes) != 0) { while ((nbytes = walk.nbytes) != 0) {
err = wc_AesCbcDecrypt(ctx->aes_decrypt, walk.dst.virt.addr, err = wc_AesCbcDecrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, nbytes & (~(WC_AES_BLOCK_SIZE - 1))); walk.src.virt.addr, nbytes & (~(WC_AES_BLOCK_SIZE - 1)));
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesCbcDecrypt failed for %u bytes: %d\n", pr_err("%s: wc_AesCbcDecrypt failed for %u bytes: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), nbytes, err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), nbytes, err);
return -EINVAL; err = -EINVAL;
goto out;
} }
nbytes &= WC_AES_BLOCK_SIZE - 1; nbytes &= WC_AES_BLOCK_SIZE - 1;
@ -360,7 +379,11 @@ static int km_AesCbcDecrypt(struct skcipher_request *req)
} }
/* copy iv from wolfCrypt back to walk.iv */ /* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_decrypt->reg, WC_AES_BLOCK_SIZE); XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err; return err;
} }
@ -409,7 +432,8 @@ static int km_AesCfbEncrypt(struct skcipher_request *req)
struct crypto_skcipher * tfm = NULL; struct crypto_skcipher * tfm = NULL;
struct km_AesCtx * ctx = NULL; struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
int err = 0; int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req); tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm); ctx = crypto_skcipher_ctx(tfm);
@ -422,22 +446,30 @@ static int km_AesCfbEncrypt(struct skcipher_request *req)
return err; return err;
} }
err = wc_AesSetIV(ctx->aes_encrypt, walk.iv); /* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes));
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesSetIV failed: %d\n", pr_err("%s: wc_AesSetIV failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
while (walk.nbytes != 0) { while (walk.nbytes != 0) {
err = wc_AesCfbEncrypt(ctx->aes_encrypt, walk.dst.virt.addr, err = wc_AesCfbEncrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes); walk.src.virt.addr, walk.nbytes);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesCfbEncrypt failed %d\n", pr_err("%s: wc_AesCfbEncrypt failed %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
err = skcipher_walk_done(&walk, 0); err = skcipher_walk_done(&walk, 0);
@ -450,7 +482,11 @@ static int km_AesCfbEncrypt(struct skcipher_request *req)
} }
/* copy iv from wolfCrypt back to walk.iv */ /* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_encrypt->reg, WC_AES_BLOCK_SIZE); XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err; return err;
} }
@ -460,7 +496,8 @@ static int km_AesCfbDecrypt(struct skcipher_request *req)
struct crypto_skcipher * tfm = NULL; struct crypto_skcipher * tfm = NULL;
struct km_AesCtx * ctx = NULL; struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
int err = 0; int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req); tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm); ctx = crypto_skcipher_ctx(tfm);
@ -473,23 +510,34 @@ static int km_AesCfbDecrypt(struct skcipher_request *req)
return err; return err;
} }
err = wc_AesSetIV(ctx->aes_encrypt, walk.iv); /* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes)); /* CFB uses the same
* schedule for encrypt
* and decrypt.
*/
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) { if (unlikely(err)) {
if (! disable_setkey_warnings) if (! disable_setkey_warnings)
pr_err("%s: wc_AesSetIV failed: %d\n", pr_err("%s: wc_AesSetIV failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
while (walk.nbytes != 0) { while (walk.nbytes != 0) {
err = wc_AesCfbDecrypt(ctx->aes_encrypt, walk.dst.virt.addr, err = wc_AesCfbDecrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes); walk.src.virt.addr, walk.nbytes);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesCfbDecrypt failed: %d\n", pr_err("%s: wc_AesCfbDecrypt failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
err = skcipher_walk_done(&walk, 0); err = skcipher_walk_done(&walk, 0);
@ -497,12 +545,16 @@ static int km_AesCfbDecrypt(struct skcipher_request *req)
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n", pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return err; goto out;
} }
} }
/* copy iv from wolfCrypt back to walk.iv */ /* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_encrypt->reg, WC_AES_BLOCK_SIZE); XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err; return err;
} }
@ -570,13 +622,23 @@ static int km_AesGcmSetKey(struct crypto_aead *tfm, const u8 *in_key,
static int km_AesGcmSetAuthsize(struct crypto_aead *tfm, unsigned int authsize) static int km_AesGcmSetAuthsize(struct crypto_aead *tfm, unsigned int authsize)
{ {
(void)tfm; (void)tfm;
if (authsize > WC_AES_BLOCK_SIZE ||
authsize < WOLFSSL_MIN_AUTH_TAG_SZ) { switch (authsize) {
pr_err("%s: invalid authsize: %d\n", case 4:
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), authsize); case 8:
return -EINVAL; case 12:
case 13:
case 14:
case 15:
case 16:
return 0;
} }
return 0;
#ifdef WOLFSSL_LINUXKM_VERBOSE_LKCAPI_DEBUG
pr_err("%s: invalid authsize: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), authsize);
#endif
return -EINVAL;
} }
/* /*
@ -595,19 +657,14 @@ static int km_AesGcmEncrypt(struct aead_request *req)
struct km_AesCtx * ctx = NULL; struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
struct scatter_walk assocSgWalk; struct scatter_walk assocSgWalk;
unsigned int nbytes = 0;
u8 authTag[WC_AES_BLOCK_SIZE]; u8 authTag[WC_AES_BLOCK_SIZE];
int err = 0; int err;
unsigned int assocLeft = 0;
unsigned int cryptLeft = 0;
u8 * assoc = NULL; u8 * assoc = NULL;
u8 * assocmem = NULL;
Aes *aes_copy;
tfm = crypto_aead_reqtfm(req); tfm = crypto_aead_reqtfm(req);
ctx = crypto_aead_ctx(tfm); ctx = crypto_aead_ctx(tfm);
assocLeft = req->assoclen;
cryptLeft = req->cryptlen;
scatterwalk_start(&assocSgWalk, req->src);
err = skcipher_walk_aead_encrypt(&walk, req, false); err = skcipher_walk_aead_encrypt(&walk, req, false);
if (unlikely(err)) { if (unlikely(err)) {
@ -616,70 +673,94 @@ static int km_AesGcmEncrypt(struct aead_request *req)
return -EINVAL; return -EINVAL;
} }
err = wc_AesGcmInit(ctx->aes_encrypt, NULL /*key*/, 0 /*keylen*/, walk.iv, /* Copy the cipher state to mitigate races on Aes.reg, Aes.tmp, and
WC_AES_BLOCK_SIZE); * aes->streamData.
*/
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes));
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
aes_copy->streamData = NULL;
#endif
err = wc_AesGcmInit(aes_copy, NULL /*key*/, 0 /*keylen*/, walk.iv,
GCM_NONCE_MID_SZ);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesGcmInit failed: %d\n", pr_err("%s: wc_AesGcmInit failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
assoc = scatterwalk_map(&assocSgWalk); if (req->src->length >= req->assoclen && req->src->length) {
if (unlikely(IS_ERR(assoc))) { scatterwalk_start(&assocSgWalk, req->src);
pr_err("%s: scatterwalk_map failed: %ld\n", assoc = scatterwalk_map(&assocSgWalk);
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), if (unlikely(IS_ERR(assoc))) {
PTR_ERR(assoc)); pr_err("%s: scatterwalk_map failed: %ld\n",
return err; crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)),
PTR_ERR(assoc));
goto out;
}
}
else {
/* assoc can be any length, so if it's noncontiguous, we have to copy it
* to a contiguous heap allocation.
*/
assocmem = malloc(req->assoclen);
if (unlikely(assocmem == NULL)) {
err = -ENOMEM;
goto out;
}
assoc = assocmem;
scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen, 0);
} }
err = wc_AesGcmEncryptUpdate(ctx->aes_encrypt, NULL, NULL, 0, err = wc_AesGcmEncryptUpdate(aes_copy, NULL, NULL, 0,
assoc, assocLeft); assoc, req->assoclen);
assocLeft -= assocLeft;
scatterwalk_unmap(assoc); if (assocmem)
assoc = NULL; free(assocmem);
else
scatterwalk_unmap(assoc);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n", pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
while ((nbytes = walk.nbytes) != 0) { while (walk.nbytes) {
int n = nbytes; err = wc_AesGcmEncryptUpdate(
aes_copy,
if (likely(cryptLeft && nbytes)) { walk.dst.virt.addr,
n = cryptLeft < nbytes ? cryptLeft : nbytes; walk.src.virt.addr,
walk.nbytes,
err = wc_AesGcmEncryptUpdate( NULL, 0);
ctx->aes_encrypt,
walk.dst.virt.addr,
walk.src.virt.addr,
cryptLeft,
NULL, 0);
nbytes -= n;
cryptLeft -= n;
}
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n", pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
err = skcipher_walk_done(&walk, nbytes); err = skcipher_walk_done(&walk, 0);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n", pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return err; goto out;
} }
} }
err = wc_AesGcmEncryptFinal(ctx->aes_encrypt, authTag, tfm->authsize); err = wc_AesGcmEncryptFinal(aes_copy, authTag, tfm->authsize);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesGcmEncryptFinal failed with return code %d\n", pr_err("%s: wc_AesGcmEncryptFinal failed with return code %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
/* Now copy the auth tag into request scatterlist. */ /* Now copy the auth tag into request scatterlist. */
@ -687,6 +768,13 @@ static int km_AesGcmEncrypt(struct aead_request *req)
req->assoclen + req->cryptlen, req->assoclen + req->cryptlen,
tfm->authsize, 1); tfm->authsize, 1);
out:
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
free(aes_copy->streamData);
#endif
free(aes_copy);
return err; return err;
} }
@ -696,25 +784,20 @@ static int km_AesGcmDecrypt(struct aead_request *req)
struct km_AesCtx * ctx = NULL; struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
struct scatter_walk assocSgWalk; struct scatter_walk assocSgWalk;
unsigned int nbytes = 0;
u8 origAuthTag[WC_AES_BLOCK_SIZE]; u8 origAuthTag[WC_AES_BLOCK_SIZE];
int err = 0; int err;
unsigned int assocLeft = 0;
unsigned int cryptLeft = 0;
u8 * assoc = NULL; u8 * assoc = NULL;
u8 * assocmem = NULL;
Aes *aes_copy;
tfm = crypto_aead_reqtfm(req); tfm = crypto_aead_reqtfm(req);
ctx = crypto_aead_ctx(tfm); ctx = crypto_aead_ctx(tfm);
assocLeft = req->assoclen;
cryptLeft = req->cryptlen - tfm->authsize;
/* Copy out original auth tag from req->src. */ /* Copy out original auth tag from req->src. */
scatterwalk_map_and_copy(origAuthTag, req->src, scatterwalk_map_and_copy(origAuthTag, req->src,
req->assoclen + req->cryptlen - tfm->authsize, req->assoclen + req->cryptlen - tfm->authsize,
tfm->authsize, 0); tfm->authsize, 0);
scatterwalk_start(&assocSgWalk, req->src);
err = skcipher_walk_aead_decrypt(&walk, req, false); err = skcipher_walk_aead_decrypt(&walk, req, false);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: skcipher_walk_aead_decrypt failed: %d\n", pr_err("%s: skcipher_walk_aead_decrypt failed: %d\n",
@ -722,78 +805,114 @@ static int km_AesGcmDecrypt(struct aead_request *req)
return err; return err;
} }
err = wc_AesGcmInit(ctx->aes_encrypt, NULL /*key*/, 0 /*keylen*/, walk.iv, /* Copy the cipher state to mitigate races on Aes.reg, Aes.tmp, and
WC_AES_BLOCK_SIZE); * aes->streamData.
*/
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes)); /* GCM uses the same
* schedule for encrypt
* and decrypt.
*/
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
aes_copy->streamData = NULL;
#endif
err = wc_AesGcmInit(aes_copy, NULL /*key*/, 0 /*keylen*/, walk.iv,
GCM_NONCE_MID_SZ);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesGcmInit failed: %d\n", pr_err("%s: wc_AesGcmInit failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
assoc = scatterwalk_map(&assocSgWalk); if (req->src->length >= req->assoclen && req->src->length) {
if (unlikely(IS_ERR(assoc))) { scatterwalk_start(&assocSgWalk, req->src);
pr_err("%s: scatterwalk_map failed: %ld\n", assoc = scatterwalk_map(&assocSgWalk);
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), if (unlikely(IS_ERR(assoc))) {
PTR_ERR(assoc)); pr_err("%s: scatterwalk_map failed: %ld\n",
return err; crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)),
PTR_ERR(assoc));
goto out;
}
}
else {
/* assoc can be any length, so if it's noncontiguous, we have to copy it
* to a contiguous heap allocation.
*/
assocmem = malloc(req->assoclen);
if (unlikely(assocmem == NULL)) {
err = -ENOMEM;
goto out;
}
assoc = assocmem;
scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen, 0);
} }
err = wc_AesGcmDecryptUpdate(ctx->aes_encrypt, NULL, NULL, 0, err = wc_AesGcmDecryptUpdate(aes_copy, NULL, NULL, 0,
assoc, assocLeft); assoc, req->assoclen);
assocLeft -= assocLeft;
scatterwalk_unmap(assoc); if (assocmem)
assoc = NULL; free(assocmem);
else
scatterwalk_unmap(assoc);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n", pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
while ((nbytes = walk.nbytes) != 0) { while (walk.nbytes) {
int n = nbytes; err = wc_AesGcmDecryptUpdate(
aes_copy,
if (likely(cryptLeft && nbytes)) { walk.dst.virt.addr,
n = cryptLeft < nbytes ? cryptLeft : nbytes; walk.src.virt.addr,
walk.nbytes,
err = wc_AesGcmDecryptUpdate( NULL, 0);
ctx->aes_encrypt,
walk.dst.virt.addr,
walk.src.virt.addr,
cryptLeft,
NULL, 0);
nbytes -= n;
cryptLeft -= n;
}
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n", pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL; err = -EINVAL;
goto out;
} }
err = skcipher_walk_done(&walk, nbytes); err = skcipher_walk_done(&walk, 0);
if (unlikely(err)) { if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n", pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return err; goto out;
} }
} }
err = wc_AesGcmDecryptFinal(ctx->aes_encrypt, origAuthTag, tfm->authsize); err = wc_AesGcmDecryptFinal(aes_copy, origAuthTag, tfm->authsize);
if (unlikely(err)) { if (unlikely(err)) {
#ifdef WOLFSSL_LINUXKM_VERBOSE_LKCAPI_DEBUG
pr_err("%s: wc_AesGcmDecryptFinal failed with return code %d\n", pr_err("%s: wc_AesGcmDecryptFinal failed with return code %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err); crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
#endif
if (err == WC_NO_ERR_TRACE(AES_GCM_AUTH_E)) { if (err == WC_NO_ERR_TRACE(AES_GCM_AUTH_E)) {
return -EBADMSG; err = -EBADMSG;
goto out;
} }
else { else {
return -EINVAL; err = -EINVAL;
goto out;
} }
} }
out:
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
free(aes_copy->streamData);
#endif
free(aes_copy);
return err; return err;
} }
@ -810,7 +929,7 @@ static struct aead_alg gcmAesAead = {
.setauthsize = km_AesGcmSetAuthsize, .setauthsize = km_AesGcmSetAuthsize,
.encrypt = km_AesGcmEncrypt, .encrypt = km_AesGcmEncrypt,
.decrypt = km_AesGcmDecrypt, .decrypt = km_AesGcmDecrypt,
.ivsize = WC_AES_BLOCK_SIZE, .ivsize = GCM_NONCE_MID_SZ,
.maxauthsize = WC_AES_BLOCK_SIZE, .maxauthsize = WC_AES_BLOCK_SIZE,
.chunksize = WC_AES_BLOCK_SIZE, .chunksize = WC_AES_BLOCK_SIZE,
}; };
@ -892,7 +1011,7 @@ static int km_AesXtsSetKey(struct crypto_skcipher *tfm, const u8 *in_key,
static int km_AesXtsEncrypt(struct skcipher_request *req) static int km_AesXtsEncrypt(struct skcipher_request *req)
{ {
int err = 0; int err;
struct crypto_skcipher * tfm = NULL; struct crypto_skcipher * tfm = NULL;
struct km_AesXtsCtx * ctx = NULL; struct km_AesXtsCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
@ -1024,7 +1143,7 @@ static int km_AesXtsEncrypt(struct skcipher_request *req)
static int km_AesXtsDecrypt(struct skcipher_request *req) static int km_AesXtsDecrypt(struct skcipher_request *req)
{ {
int err = 0; int err;
struct crypto_skcipher * tfm = NULL; struct crypto_skcipher * tfm = NULL;
struct km_AesXtsCtx * ctx = NULL; struct km_AesXtsCtx * ctx = NULL;
struct skcipher_walk walk; struct skcipher_walk walk;
@ -1626,17 +1745,17 @@ static int linuxkm_test_aesgcm(void)
0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef, 0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
0xab, 0xad, 0xda, 0xd2 0xab, 0xad, 0xda, 0xd2
}; };
static const byte ivstr[] = "1234567890abcdef"; static const byte ivstr[] = "1234567890ab";
static const byte c_vector[] = static const byte c_vector[] =
{ {
0x0c,0x97,0x05,0x3c,0xef,0x5c,0x63,0x6b, 0x80,0xb9,0x00,0xdc,0x03,0xb8,0x0e,0xaa,
0x15,0xe4,0x00,0x63,0xf8,0x8c,0xd0,0x95, 0x98,0x09,0x75,0x01,0x40,0x09,0xb0,0xc3,
0x27,0x81,0x90,0x9c,0x9f,0xe6,0x98,0xe9 0x7a,0xed,0x2c,0x2e,0x4d,0xe5,0xca,0x80
}; };
static const byte KAT_authTag[] = static const byte KAT_authTag[] =
{ {
0xc9,0xd5,0x7a,0x77,0xac,0x28,0xc2,0xe7, 0x8d,0xf5,0x76,0xae,0x53,0x20,0x5d,0x9c,
0xe4,0x28,0x90,0xaa,0x09,0xab,0xf9,0x7c 0x01,0x64,0xcd,0xf2,0xec,0x7a,0x13,0x03
}; };
byte enc[sizeof(p_vector)]; byte enc[sizeof(p_vector)];
byte authTag[WC_AES_BLOCK_SIZE]; byte authTag[WC_AES_BLOCK_SIZE];
@ -1665,7 +1784,7 @@ static int linuxkm_test_aesgcm(void)
aes_inited = 1; aes_inited = 1;
ret = wc_AesGcmInit(aes, key32, sizeof(key32)/sizeof(byte), ivstr, ret = wc_AesGcmInit(aes, key32, sizeof(key32)/sizeof(byte), ivstr,
WC_AES_BLOCK_SIZE); GCM_NONCE_MID_SZ);
if (ret) { if (ret) {
pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret); pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret);
goto test_gcm_end; goto test_gcm_end;
@ -1705,7 +1824,7 @@ static int linuxkm_test_aesgcm(void)
} }
ret = wc_AesGcmInit(aes, key32, sizeof(key32)/sizeof(byte), ivstr, ret = wc_AesGcmInit(aes, key32, sizeof(key32)/sizeof(byte), ivstr,
WC_AES_BLOCK_SIZE); GCM_NONCE_MID_SZ);
if (ret) { if (ret) {
pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret); pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret);
goto test_gcm_end; goto test_gcm_end;
@ -1747,7 +1866,7 @@ static int linuxkm_test_aesgcm(void)
goto test_gcm_end; goto test_gcm_end;
} }
memset(iv, 0, WC_AES_BLOCK_SIZE); memset(iv, 0, WC_AES_BLOCK_SIZE);
memcpy(iv, ivstr, WC_AES_BLOCK_SIZE); memcpy(iv, ivstr, GCM_NONCE_MID_SZ);
enc2 = malloc(decryptLen); enc2 = malloc(decryptLen);
if (IS_ERR(enc2)) { if (IS_ERR(enc2)) {

View File

@ -3641,6 +3641,8 @@ extern void uITRON4_free(void *p) ;
#ifndef NO_OLD_SSL_NAMES #ifndef NO_OLD_SSL_NAMES
#define NO_OLD_SSL_NAMES #define NO_OLD_SSL_NAMES
#endif #endif
#undef WOLFSSL_MIN_AUTH_TAG_SZ
#define WOLFSSL_MIN_AUTH_TAG_SZ 4
#endif #endif