Merge pull request #8567 from douzzer/20250317-linuxkm-AES-GCM

20250317-linuxkm-AES-GCM
This commit is contained in:
philljj
2025-03-19 12:48:10 -04:00
committed by GitHub
3 changed files with 256 additions and 137 deletions

View File

@ -9069,15 +9069,13 @@ then
for lkcapi_alg in $(echo "$ENABLED_LINUXKM_LKCAPI_REGISTER" | tr ',' ' ')
do
case "$lkcapi_alg" in
all) test "$ENABLED_EXPERIMENTAL" = "yes" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: requires --enable-experimental.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_ALL" ;;
all) AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_ALL" ;;
'cbc(aes)') test "$ENABLED_AESCBC" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CBC implementation not enabled.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCBC" ;;
'cfb(aes)') test "$ENABLED_AESCFB" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-CFB implementation not enabled.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESCFB" ;;
'gcm(aes)') test "$ENABLED_AESGCM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-GCM implementation not enabled.])
test "$ENABLED_AESGCM_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesgcm-stream is required for LKCAPI.])
test "$ENABLED_EXPERIMENTAL" = "yes" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: requires --enable-experimental.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESGCM" ;;
'xts(aes)') test "$ENABLED_AESXTS" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-XTS implementation not enabled.])
test "$ENABLED_AESXTS_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesxts-stream is required for LKCAPI.])

View File

@ -115,9 +115,6 @@ static int linuxkm_test_aescfb(void);
#if defined(HAVE_AESGCM) && \
(defined(LINUXKM_LKCAPI_REGISTER_ALL) || \
defined(LINUXKM_LKCAPI_REGISTER_AESGCM))
#ifndef WOLFSSL_EXPERIMENTAL_SETTINGS
#error Experimental settings without WOLFSSL_EXPERIMENTAL_SETTINGS
#endif
static int linuxkm_test_aesgcm(void);
#endif
#if defined(WOLFSSL_AES_XTS) && \
@ -280,7 +277,8 @@ static int km_AesCbcEncrypt(struct skcipher_request *req)
struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk;
unsigned int nbytes = 0;
int err = 0;
int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm);
@ -291,22 +289,30 @@ static int km_AesCbcEncrypt(struct skcipher_request *req)
return err;
}
err = wc_AesSetIV(ctx->aes_encrypt, walk.iv);
/* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes));
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) {
pr_err("%s: wc_AesSetIV failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
while ((nbytes = walk.nbytes) != 0) {
err = wc_AesCbcEncrypt(ctx->aes_encrypt, walk.dst.virt.addr,
err = wc_AesCbcEncrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, nbytes & (~(WC_AES_BLOCK_SIZE - 1)));
if (unlikely(err)) {
pr_err("%s: wc_AesCbcEncrypt failed for %u bytes: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), nbytes, err);
return -EINVAL;
err = -EINVAL;
goto out;
}
nbytes &= WC_AES_BLOCK_SIZE - 1;
@ -314,7 +320,11 @@ static int km_AesCbcEncrypt(struct skcipher_request *req)
}
/* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_encrypt->reg, WC_AES_BLOCK_SIZE);
XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err;
}
@ -325,7 +335,8 @@ static int km_AesCbcDecrypt(struct skcipher_request *req)
struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk;
unsigned int nbytes = 0;
int err = 0;
int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm);
@ -336,23 +347,31 @@ static int km_AesCbcDecrypt(struct skcipher_request *req)
return err;
}
err = wc_AesSetIV(ctx->aes_decrypt, walk.iv);
/* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_decrypt, sizeof(Aes));
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) {
if (! disable_setkey_warnings)
pr_err("%s: wc_AesSetKey failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
while ((nbytes = walk.nbytes) != 0) {
err = wc_AesCbcDecrypt(ctx->aes_decrypt, walk.dst.virt.addr,
err = wc_AesCbcDecrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, nbytes & (~(WC_AES_BLOCK_SIZE - 1)));
if (unlikely(err)) {
pr_err("%s: wc_AesCbcDecrypt failed for %u bytes: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), nbytes, err);
return -EINVAL;
err = -EINVAL;
goto out;
}
nbytes &= WC_AES_BLOCK_SIZE - 1;
@ -360,7 +379,11 @@ static int km_AesCbcDecrypt(struct skcipher_request *req)
}
/* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_decrypt->reg, WC_AES_BLOCK_SIZE);
XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err;
}
@ -409,7 +432,8 @@ static int km_AesCfbEncrypt(struct skcipher_request *req)
struct crypto_skcipher * tfm = NULL;
struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk;
int err = 0;
int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm);
@ -422,22 +446,30 @@ static int km_AesCfbEncrypt(struct skcipher_request *req)
return err;
}
err = wc_AesSetIV(ctx->aes_encrypt, walk.iv);
/* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes));
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) {
pr_err("%s: wc_AesSetIV failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
while (walk.nbytes != 0) {
err = wc_AesCfbEncrypt(ctx->aes_encrypt, walk.dst.virt.addr,
err = wc_AesCfbEncrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes);
if (unlikely(err)) {
pr_err("%s: wc_AesCfbEncrypt failed %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
err = skcipher_walk_done(&walk, 0);
@ -450,7 +482,11 @@ static int km_AesCfbEncrypt(struct skcipher_request *req)
}
/* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_encrypt->reg, WC_AES_BLOCK_SIZE);
XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err;
}
@ -460,7 +496,8 @@ static int km_AesCfbDecrypt(struct skcipher_request *req)
struct crypto_skcipher * tfm = NULL;
struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk;
int err = 0;
int err;
Aes *aes_copy;
tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm);
@ -473,23 +510,34 @@ static int km_AesCfbDecrypt(struct skcipher_request *req)
return err;
}
err = wc_AesSetIV(ctx->aes_encrypt, walk.iv);
/* Copy the cipher state to mitigate races on Aes.reg and Aes.tmp. */
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes)); /* CFB uses the same
* schedule for encrypt
* and decrypt.
*/
err = wc_AesSetIV(aes_copy, walk.iv);
if (unlikely(err)) {
if (! disable_setkey_warnings)
pr_err("%s: wc_AesSetIV failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
while (walk.nbytes != 0) {
err = wc_AesCfbDecrypt(ctx->aes_encrypt, walk.dst.virt.addr,
err = wc_AesCfbDecrypt(aes_copy, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes);
if (unlikely(err)) {
pr_err("%s: wc_AesCfbDecrypt failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
err = skcipher_walk_done(&walk, 0);
@ -497,12 +545,16 @@ static int km_AesCfbDecrypt(struct skcipher_request *req)
if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return err;
goto out;
}
}
/* copy iv from wolfCrypt back to walk.iv */
XMEMCPY(walk.iv, ctx->aes_encrypt->reg, WC_AES_BLOCK_SIZE);
XMEMCPY(walk.iv, aes_copy->reg, WC_AES_BLOCK_SIZE);
out:
free(aes_copy);
return err;
}
@ -570,14 +622,24 @@ static int km_AesGcmSetKey(struct crypto_aead *tfm, const u8 *in_key,
static int km_AesGcmSetAuthsize(struct crypto_aead *tfm, unsigned int authsize)
{
(void)tfm;
if (authsize > WC_AES_BLOCK_SIZE ||
authsize < WOLFSSL_MIN_AUTH_TAG_SZ) {
switch (authsize) {
case 4:
case 8:
case 12:
case 13:
case 14:
case 15:
case 16:
return 0;
}
#ifdef WOLFSSL_LINUXKM_VERBOSE_LKCAPI_DEBUG
pr_err("%s: invalid authsize: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), authsize);
#endif
return -EINVAL;
}
return 0;
}
/*
* aead ciphers receive data in scatterlists in following order:
@ -595,19 +657,14 @@ static int km_AesGcmEncrypt(struct aead_request *req)
struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk;
struct scatter_walk assocSgWalk;
unsigned int nbytes = 0;
u8 authTag[WC_AES_BLOCK_SIZE];
int err = 0;
unsigned int assocLeft = 0;
unsigned int cryptLeft = 0;
int err;
u8 * assoc = NULL;
u8 * assocmem = NULL;
Aes *aes_copy;
tfm = crypto_aead_reqtfm(req);
ctx = crypto_aead_ctx(tfm);
assocLeft = req->assoclen;
cryptLeft = req->cryptlen;
scatterwalk_start(&assocSgWalk, req->src);
err = skcipher_walk_aead_encrypt(&walk, req, false);
if (unlikely(err)) {
@ -616,70 +673,94 @@ static int km_AesGcmEncrypt(struct aead_request *req)
return -EINVAL;
}
err = wc_AesGcmInit(ctx->aes_encrypt, NULL /*key*/, 0 /*keylen*/, walk.iv,
WC_AES_BLOCK_SIZE);
/* Copy the cipher state to mitigate races on Aes.reg, Aes.tmp, and
* aes->streamData.
*/
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes));
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
aes_copy->streamData = NULL;
#endif
err = wc_AesGcmInit(aes_copy, NULL /*key*/, 0 /*keylen*/, walk.iv,
GCM_NONCE_MID_SZ);
if (unlikely(err)) {
pr_err("%s: wc_AesGcmInit failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
if (req->src->length >= req->assoclen && req->src->length) {
scatterwalk_start(&assocSgWalk, req->src);
assoc = scatterwalk_map(&assocSgWalk);
if (unlikely(IS_ERR(assoc))) {
pr_err("%s: scatterwalk_map failed: %ld\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)),
PTR_ERR(assoc));
return err;
goto out;
}
}
else {
/* assoc can be any length, so if it's noncontiguous, we have to copy it
* to a contiguous heap allocation.
*/
assocmem = malloc(req->assoclen);
if (unlikely(assocmem == NULL)) {
err = -ENOMEM;
goto out;
}
assoc = assocmem;
scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen, 0);
}
err = wc_AesGcmEncryptUpdate(ctx->aes_encrypt, NULL, NULL, 0,
assoc, assocLeft);
assocLeft -= assocLeft;
err = wc_AesGcmEncryptUpdate(aes_copy, NULL, NULL, 0,
assoc, req->assoclen);
if (assocmem)
free(assocmem);
else
scatterwalk_unmap(assoc);
assoc = NULL;
if (unlikely(err)) {
pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
while ((nbytes = walk.nbytes) != 0) {
int n = nbytes;
if (likely(cryptLeft && nbytes)) {
n = cryptLeft < nbytes ? cryptLeft : nbytes;
while (walk.nbytes) {
err = wc_AesGcmEncryptUpdate(
ctx->aes_encrypt,
aes_copy,
walk.dst.virt.addr,
walk.src.virt.addr,
cryptLeft,
walk.nbytes,
NULL, 0);
nbytes -= n;
cryptLeft -= n;
}
if (unlikely(err)) {
pr_err("%s: wc_AesGcmEncryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
err = skcipher_walk_done(&walk, nbytes);
err = skcipher_walk_done(&walk, 0);
if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return err;
goto out;
}
}
err = wc_AesGcmEncryptFinal(ctx->aes_encrypt, authTag, tfm->authsize);
err = wc_AesGcmEncryptFinal(aes_copy, authTag, tfm->authsize);
if (unlikely(err)) {
pr_err("%s: wc_AesGcmEncryptFinal failed with return code %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
/* Now copy the auth tag into request scatterlist. */
@ -687,6 +768,13 @@ static int km_AesGcmEncrypt(struct aead_request *req)
req->assoclen + req->cryptlen,
tfm->authsize, 1);
out:
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
free(aes_copy->streamData);
#endif
free(aes_copy);
return err;
}
@ -696,25 +784,20 @@ static int km_AesGcmDecrypt(struct aead_request *req)
struct km_AesCtx * ctx = NULL;
struct skcipher_walk walk;
struct scatter_walk assocSgWalk;
unsigned int nbytes = 0;
u8 origAuthTag[WC_AES_BLOCK_SIZE];
int err = 0;
unsigned int assocLeft = 0;
unsigned int cryptLeft = 0;
int err;
u8 * assoc = NULL;
u8 * assocmem = NULL;
Aes *aes_copy;
tfm = crypto_aead_reqtfm(req);
ctx = crypto_aead_ctx(tfm);
assocLeft = req->assoclen;
cryptLeft = req->cryptlen - tfm->authsize;
/* Copy out original auth tag from req->src. */
scatterwalk_map_and_copy(origAuthTag, req->src,
req->assoclen + req->cryptlen - tfm->authsize,
tfm->authsize, 0);
scatterwalk_start(&assocSgWalk, req->src);
err = skcipher_walk_aead_decrypt(&walk, req, false);
if (unlikely(err)) {
pr_err("%s: skcipher_walk_aead_decrypt failed: %d\n",
@ -722,78 +805,114 @@ static int km_AesGcmDecrypt(struct aead_request *req)
return err;
}
err = wc_AesGcmInit(ctx->aes_encrypt, NULL /*key*/, 0 /*keylen*/, walk.iv,
WC_AES_BLOCK_SIZE);
/* Copy the cipher state to mitigate races on Aes.reg, Aes.tmp, and
* aes->streamData.
*/
aes_copy = (struct Aes *)malloc(sizeof(Aes));
if (aes_copy == NULL)
return -ENOMEM;
XMEMCPY(aes_copy, ctx->aes_encrypt, sizeof(Aes)); /* GCM uses the same
* schedule for encrypt
* and decrypt.
*/
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
aes_copy->streamData = NULL;
#endif
err = wc_AesGcmInit(aes_copy, NULL /*key*/, 0 /*keylen*/, walk.iv,
GCM_NONCE_MID_SZ);
if (unlikely(err)) {
pr_err("%s: wc_AesGcmInit failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
if (req->src->length >= req->assoclen && req->src->length) {
scatterwalk_start(&assocSgWalk, req->src);
assoc = scatterwalk_map(&assocSgWalk);
if (unlikely(IS_ERR(assoc))) {
pr_err("%s: scatterwalk_map failed: %ld\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)),
PTR_ERR(assoc));
return err;
goto out;
}
}
else {
/* assoc can be any length, so if it's noncontiguous, we have to copy it
* to a contiguous heap allocation.
*/
assocmem = malloc(req->assoclen);
if (unlikely(assocmem == NULL)) {
err = -ENOMEM;
goto out;
}
assoc = assocmem;
scatterwalk_map_and_copy(assoc, req->src, 0, req->assoclen, 0);
}
err = wc_AesGcmDecryptUpdate(ctx->aes_encrypt, NULL, NULL, 0,
assoc, assocLeft);
assocLeft -= assocLeft;
err = wc_AesGcmDecryptUpdate(aes_copy, NULL, NULL, 0,
assoc, req->assoclen);
if (assocmem)
free(assocmem);
else
scatterwalk_unmap(assoc);
assoc = NULL;
if (unlikely(err)) {
pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
while ((nbytes = walk.nbytes) != 0) {
int n = nbytes;
if (likely(cryptLeft && nbytes)) {
n = cryptLeft < nbytes ? cryptLeft : nbytes;
while (walk.nbytes) {
err = wc_AesGcmDecryptUpdate(
ctx->aes_encrypt,
aes_copy,
walk.dst.virt.addr,
walk.src.virt.addr,
cryptLeft,
walk.nbytes,
NULL, 0);
nbytes -= n;
cryptLeft -= n;
}
if (unlikely(err)) {
pr_err("%s: wc_AesGcmDecryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return -EINVAL;
err = -EINVAL;
goto out;
}
err = skcipher_walk_done(&walk, nbytes);
err = skcipher_walk_done(&walk, 0);
if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
return err;
goto out;
}
}
err = wc_AesGcmDecryptFinal(ctx->aes_encrypt, origAuthTag, tfm->authsize);
err = wc_AesGcmDecryptFinal(aes_copy, origAuthTag, tfm->authsize);
if (unlikely(err)) {
#ifdef WOLFSSL_LINUXKM_VERBOSE_LKCAPI_DEBUG
pr_err("%s: wc_AesGcmDecryptFinal failed with return code %d\n",
crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)), err);
#endif
if (err == WC_NO_ERR_TRACE(AES_GCM_AUTH_E)) {
return -EBADMSG;
err = -EBADMSG;
goto out;
}
else {
return -EINVAL;
err = -EINVAL;
goto out;
}
}
out:
#if defined(WOLFSSL_SMALL_STACK) && !defined(WOLFSSL_AESNI)
free(aes_copy->streamData);
#endif
free(aes_copy);
return err;
}
@ -810,7 +929,7 @@ static struct aead_alg gcmAesAead = {
.setauthsize = km_AesGcmSetAuthsize,
.encrypt = km_AesGcmEncrypt,
.decrypt = km_AesGcmDecrypt,
.ivsize = WC_AES_BLOCK_SIZE,
.ivsize = GCM_NONCE_MID_SZ,
.maxauthsize = WC_AES_BLOCK_SIZE,
.chunksize = WC_AES_BLOCK_SIZE,
};
@ -892,7 +1011,7 @@ static int km_AesXtsSetKey(struct crypto_skcipher *tfm, const u8 *in_key,
static int km_AesXtsEncrypt(struct skcipher_request *req)
{
int err = 0;
int err;
struct crypto_skcipher * tfm = NULL;
struct km_AesXtsCtx * ctx = NULL;
struct skcipher_walk walk;
@ -1024,7 +1143,7 @@ static int km_AesXtsEncrypt(struct skcipher_request *req)
static int km_AesXtsDecrypt(struct skcipher_request *req)
{
int err = 0;
int err;
struct crypto_skcipher * tfm = NULL;
struct km_AesXtsCtx * ctx = NULL;
struct skcipher_walk walk;
@ -1626,17 +1745,17 @@ static int linuxkm_test_aesgcm(void)
0xfe, 0xed, 0xfa, 0xce, 0xde, 0xad, 0xbe, 0xef,
0xab, 0xad, 0xda, 0xd2
};
static const byte ivstr[] = "1234567890abcdef";
static const byte ivstr[] = "1234567890ab";
static const byte c_vector[] =
{
0x0c,0x97,0x05,0x3c,0xef,0x5c,0x63,0x6b,
0x15,0xe4,0x00,0x63,0xf8,0x8c,0xd0,0x95,
0x27,0x81,0x90,0x9c,0x9f,0xe6,0x98,0xe9
0x80,0xb9,0x00,0xdc,0x03,0xb8,0x0e,0xaa,
0x98,0x09,0x75,0x01,0x40,0x09,0xb0,0xc3,
0x7a,0xed,0x2c,0x2e,0x4d,0xe5,0xca,0x80
};
static const byte KAT_authTag[] =
{
0xc9,0xd5,0x7a,0x77,0xac,0x28,0xc2,0xe7,
0xe4,0x28,0x90,0xaa,0x09,0xab,0xf9,0x7c
0x8d,0xf5,0x76,0xae,0x53,0x20,0x5d,0x9c,
0x01,0x64,0xcd,0xf2,0xec,0x7a,0x13,0x03
};
byte enc[sizeof(p_vector)];
byte authTag[WC_AES_BLOCK_SIZE];
@ -1665,7 +1784,7 @@ static int linuxkm_test_aesgcm(void)
aes_inited = 1;
ret = wc_AesGcmInit(aes, key32, sizeof(key32)/sizeof(byte), ivstr,
WC_AES_BLOCK_SIZE);
GCM_NONCE_MID_SZ);
if (ret) {
pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret);
goto test_gcm_end;
@ -1705,7 +1824,7 @@ static int linuxkm_test_aesgcm(void)
}
ret = wc_AesGcmInit(aes, key32, sizeof(key32)/sizeof(byte), ivstr,
WC_AES_BLOCK_SIZE);
GCM_NONCE_MID_SZ);
if (ret) {
pr_err("error: wc_AesGcmInit failed with return code %d.\n", ret);
goto test_gcm_end;
@ -1747,7 +1866,7 @@ static int linuxkm_test_aesgcm(void)
goto test_gcm_end;
}
memset(iv, 0, WC_AES_BLOCK_SIZE);
memcpy(iv, ivstr, WC_AES_BLOCK_SIZE);
memcpy(iv, ivstr, GCM_NONCE_MID_SZ);
enc2 = malloc(decryptLen);
if (IS_ERR(enc2)) {

View File

@ -3641,6 +3641,8 @@ extern void uITRON4_free(void *p) ;
#ifndef NO_OLD_SSL_NAMES
#define NO_OLD_SSL_NAMES
#endif
#undef WOLFSSL_MIN_AUTH_TAG_SZ
#define WOLFSSL_MIN_AUTH_TAG_SZ 4
#endif