add WOLFSSL_AESXTS_STREAM, --enable-aesxts-stream, wc_AesXtsEncryptStart(), wc_AesXtsDecryptStart(), wc_AesXtsEncryptUpdate(), wc_AesXtsDecryptUpdate(), and implement fixes in linuxkm/lkcapi_glue.c to use the streaming API when needed. also added support for 2*192 bit AES-XTS, needed for Linux kernel.

This commit is contained in:
Daniel Pouzzner
2024-05-11 02:36:27 -05:00
parent 28bd4ebeea
commit 70d7b6e48b
5 changed files with 784 additions and 54 deletions

View File

@@ -939,6 +939,7 @@ then
if test "$ENABLED_FIPS" = "no" || test "$HAVE_FIPS_VERSION" -ge 6 || test "$FIPS_VERSION" = "v5-dev"; then
test "$enable_aesxts" = "" && enable_aesxts=yes
test "$enable_aesxts_stream" = "" && test "$enable_aesxts" = "yes" && enable_aesxts_stream=yes
test "$enable_aessiv" = "" && enable_aessiv=yes
fi
@@ -1078,6 +1079,7 @@ then
if test "$ENABLED_FIPS" = "no" || test "$HAVE_FIPS_VERSION" -ge 6 || test "$FIPS_VERSION" = "v5-dev"; then
test "$enable_aesxts" = "" && enable_aesxts=yes
test "$enable_aesxts_stream" = "" && test "$enable_aesxts" = "yes" && enable_aesxts_stream=yes
test "$enable_aessiv" = "" && enable_aessiv=yes
fi
@@ -4847,6 +4849,11 @@ AC_ARG_ENABLE([aesxts],
[ ENABLED_AESXTS=$enableval ],
[ ENABLED_AESXTS=no ]
)
AC_ARG_ENABLE([aesxts-stream],
[AS_HELP_STRING([--enable-aesxts-stream],[Enable wolfSSL AES-XTS support with streaming APIs (default: disabled)])],
[ ENABLED_AESXTS_STREAM=$enableval ],
[ ENABLED_AESXTS_STREAM=$ENABLED_AESXTS ]
)
# legacy old option name, for compatibility:
AC_ARG_ENABLE([xts],
@@ -5070,6 +5077,11 @@ AS_CASE([$FIPS_VERSION],
AS_IF([test "x$ENABLED_AESXTS" = "xyes" && test "x$ENABLED_AESNI" = "xyes"],
[AM_CCASFLAGS="$AM_CCASFLAGS -DWOLFSSL_AES_XTS"])
AS_IF([test "x$ENABLED_AESXTS_STREAM" = "xno"],
[ENABLED_AESXTS_STREAM="yes"; AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_AESXTS_STREAM"])
AS_IF([test "x$ENABLED_AESXTS_STREAM" = "xyes" && test "x$ENABLED_AESNI" = "xyes"],
[AM_CCASFLAGS="$AM_CCASFLAGS -DWOLFSSL_AESXTS_STREAM"])
AS_IF([(test "$ENABLED_AESCCM" = "yes" && test "$HAVE_AESCCM_PORT" != "yes") ||
(test "$ENABLED_AESCTR" = "yes" && test "$HAVE_AESCTR_PORT" != "yes") ||
(test "$ENABLED_AESGCM" = "yes" && test "$HAVE_AESGCM_PORT" != "yes") ||
@@ -8360,6 +8372,7 @@ then
test "$ENABLED_AESGCM_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesgcm-stream is required for LKCAPI.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESGCM" ;;
'xts(aes)') test "$ENABLED_AESXTS" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: AES-XTS implementation not enabled.])
test "$ENABLED_AESXTS_STREAM" != "no" || AC_MSG_ERROR([linuxkm-lkcapi-register ${lkcapi_alg}: --enable-aesxts-stream is required for LKCAPI.])
AM_CFLAGS="$AM_CFLAGS -DLINUXKM_LKCAPI_REGISTER_AESXTS" ;;
*) AC_MSG_ERROR([Unsupported LKCAPI algorithm "$lkcapi_alg".]) ;;
esac
@@ -9009,6 +9022,17 @@ then
fi
fi
if test "$ENABLED_AESXTS_STREAM" != "no"
then
if test "$ENABLED_AESXTS" = "no"
then
AC_MSG_ERROR([AES-XTS streaming enabled but AES-XTS is disabled])
else
AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_AESXTS_STREAM"
AM_CCASFLAGS="$AM_CCASFLAGS -DWOLFSSL_AESXTS_STREAM"
fi
fi
if test "$ENABLED_IOTSAFE" != "no"
then
AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_IOTSAFE"
@@ -9769,6 +9793,7 @@ echo " * AES-CTR: $ENABLED_AESCTR"
echo " * AES-CFB: $ENABLED_AESCFB"
echo " * AES-OFB: $ENABLED_AESOFB"
echo " * AES-XTS: $ENABLED_AESXTS"
echo " * AES-XTS streaming: $ENABLED_AESXTS_STREAM"
echo " * AES-SIV: $ENABLED_AESSIV"
echo " * AES-EAX: $ENABLED_AESEAX"
echo " * AES Bitspliced: $ENABLED_AESBS"

View File

@@ -790,6 +790,10 @@ static int gcmAesAead_loaded = 0;
(defined(LINUXKM_LKCAPI_REGISTER_ALL) || \
defined(LINUXKM_LKCAPI_REGISTER_AESXTS))
#ifndef WOLFSSL_AESGCM_STREAM
#error LKCAPI registration of AES-XTS requires WOLFSSL_AESXTS_STREAM (--enable-aesxts-stream).
#endif
struct km_AesXtsCtx {
XtsAes *aesXts; /* allocated in km_AesXtsInitCommon() to assure alignment
* for AESNI.
@@ -835,6 +839,16 @@ static int km_AesXtsSetKey(struct crypto_skcipher *tfm, const u8 *in_key,
int err;
struct km_AesXtsCtx * ctx = crypto_skcipher_ctx(tfm);
/* filter bad keysizes here, to avoid console noise from
* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS.
*/
if ((key_len != (AES_128_KEY_SIZE*2)) &&
(key_len != (AES_192_KEY_SIZE*2)) &&
(key_len != (AES_256_KEY_SIZE*2)))
{
return -EINVAL;
}
err = wc_AesXtsSetKeyNoInit(ctx->aesXts, in_key, key_len,
AES_ENCRYPTION_AND_DECRYPTION);
@@ -852,7 +866,6 @@ static int km_AesXtsSetKey(struct crypto_skcipher *tfm, const u8 *in_key,
static int km_AesXtsEncrypt(struct skcipher_request *req)
{
int err = 0;
struct crypto_skcipher * tfm = NULL;
struct km_AesXtsCtx * ctx = NULL;
struct skcipher_walk walk;
@@ -861,6 +874,9 @@ static int km_AesXtsEncrypt(struct skcipher_request *req)
tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm);
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
err = skcipher_walk_virt(&walk, req, false);
if (unlikely(err)) {
@@ -869,10 +885,9 @@ static int km_AesXtsEncrypt(struct skcipher_request *req)
return err;
}
while ((nbytes = walk.nbytes) != 0) {
if (walk.nbytes == walk.total) {
err = wc_AesXtsEncrypt(ctx->aesXts, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
walk.iv, walk.ivsize);
walk.src.virt.addr, walk.nbytes, walk.iv, walk.ivsize);
if (unlikely(err)) {
pr_err("%s: wc_AesXtsEncrypt failed: %d\n",
@@ -880,12 +895,91 @@ static int km_AesXtsEncrypt(struct skcipher_request *req)
return -EINVAL;
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
err = skcipher_walk_done(&walk, 0);
} else {
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
byte tweak_block[AES_BLOCK_SIZE];
if (tail > 0) {
int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
skcipher_walk_abort(&walk);
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
blocks * AES_BLOCK_SIZE, req->iv);
req = &subreq;
err = skcipher_walk_virt(&walk, req, false);
if (!walk.nbytes)
return err;
} else {
tail = 0;
}
err = wc_AesXtsEncryptStart(ctx->aesXts, walk.iv, walk.ivsize,
tweak_block);
if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n",
pr_err("%s: wc_AesXtsEncryptStart failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return err;
return -EINVAL;
}
while ((nbytes = walk.nbytes) != 0) {
if (nbytes < walk.total)
nbytes &= ~(AES_BLOCK_SIZE - 1);
err = wc_AesXtsEncryptUpdate(ctx->aesXts, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
tweak_block);
if (unlikely(err)) {
pr_err("%s: wc_AesXtsEncryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return err;
}
}
if (unlikely(tail > 0 && !err)) {
struct scatterlist sg_src[2], sg_dst[2];
struct scatterlist *src, *dst;
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
err = wc_AesXtsEncryptUpdate(ctx->aesXts, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes,
tweak_block);
if (unlikely(err)) {
pr_err("%s: wc_AesXtsEncryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
}
err = skcipher_walk_done(&walk, 0);
}
}
@@ -903,6 +997,9 @@ static int km_AesXtsDecrypt(struct skcipher_request *req)
tfm = crypto_skcipher_reqtfm(req);
ctx = crypto_skcipher_ctx(tfm);
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
err = skcipher_walk_virt(&walk, req, false);
if (unlikely(err)) {
@@ -911,26 +1008,106 @@ static int km_AesXtsDecrypt(struct skcipher_request *req)
return err;
}
while ((nbytes = walk.nbytes) != 0) {
err = wc_AesXtsDecrypt(ctx->aesXts, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
walk.iv, walk.ivsize);
if (walk.nbytes == walk.total) {
err = wc_AesXtsDecrypt(ctx->aesXts,
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv, walk.ivsize);
if (unlikely(err)) {
pr_err("%s: wc_AesCbcDecrypt failed: %d\n",
pr_err("%s: wc_AesXtsDecrypt failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
err = skcipher_walk_done(&walk, 0);
} else {
int tail = req->cryptlen % AES_BLOCK_SIZE;
struct skcipher_request subreq;
byte tweak_block[AES_BLOCK_SIZE];
if (unlikely(tail > 0)) {
int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
skcipher_walk_abort(&walk);
skcipher_request_set_tfm(&subreq, tfm);
skcipher_request_set_callback(&subreq,
skcipher_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(&subreq, req->src, req->dst,
blocks * AES_BLOCK_SIZE, req->iv);
req = &subreq;
err = skcipher_walk_virt(&walk, req, false);
if (!walk.nbytes)
return err;
} else {
tail = 0;
}
err = wc_AesXtsDecryptStart(ctx->aesXts, walk.iv, walk.ivsize,
tweak_block);
if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n",
pr_err("%s: wc_AesXtsDecryptStart failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return err;
return -EINVAL;
}
}
while ((nbytes = walk.nbytes) != 0) {
if (nbytes < walk.total)
nbytes &= ~(AES_BLOCK_SIZE - 1);
err = wc_AesXtsDecryptUpdate(ctx->aesXts, walk.dst.virt.addr,
walk.src.virt.addr, nbytes,
tweak_block);
if (unlikely(err)) {
pr_err("%s: wc_AesXtsDecryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (unlikely(err)) {
pr_err("%s: skcipher_walk_done failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return err;
}
}
if (unlikely(tail > 0 && !err)) {
struct scatterlist sg_src[2], sg_dst[2];
struct scatterlist *src, *dst;
dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
if (req->dst != req->src)
dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req->iv);
err = skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
err = wc_AesXtsDecryptUpdate(ctx->aesXts, walk.dst.virt.addr,
walk.src.virt.addr, walk.nbytes,
tweak_block);
if (unlikely(err)) {
pr_err("%s: wc_AesXtsDecryptUpdate failed: %d\n",
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)), err);
return -EINVAL;
}
err = skcipher_walk_done(&walk, 0);
}
}
return err;
}

View File

@@ -189,8 +189,7 @@ static struct wc_thread_fpu_count_ent *wc_linuxkm_fpu_state_assoc(int create_p)
* dependency loop on intelasm builds, we allocate here.
* this is not thread-safe and doesn't need to be.
*/
int ret = allocate_wolfcrypt_linuxkm_fpu_states();
if (ret != 0)
if ((! create_p) || (allocate_wolfcrypt_linuxkm_fpu_states() != 0))
#endif
{
if (_warned_on_null == 0) {

View File

@@ -12336,12 +12336,16 @@ int wc_AesXtsSetKeyNoInit(XtsAes* aes, const byte* key, word32 len, int dir)
return BAD_FUNC_ARG;
}
keySz = len/2;
if (keySz != AES_128_KEY_SIZE && keySz != AES_256_KEY_SIZE) {
if ((len != (AES_128_KEY_SIZE*2)) &&
(len != (AES_192_KEY_SIZE*2)) &&
(len != (AES_256_KEY_SIZE*2)))
{
WOLFSSL_MSG("Unsupported key size");
return WC_KEY_SIZE_E;
}
keySz = len/2;
#ifdef HAVE_FIPS
if (XMEMCMP(key, key + keySz, keySz) == 0) {
WOLFSSL_MSG("FIPS AES-XTS main and tweak keys must differ");
@@ -12590,7 +12594,6 @@ static WARN_UNUSED_RESULT int _AesXtsHelper(
}
#endif /* HAVE_AES_ECB */
/* AES with XTS mode. (XTS) XEX encryption with Tweak and cipher text Stealing.
*
* xaes AES keys to use for block encrypt/decrypt
@@ -12605,25 +12608,20 @@ static WARN_UNUSED_RESULT int _AesXtsHelper(
static int AesXtsEncrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
const byte* i)
{
int ret = 0;
int ret;
word32 blocks = (sz / AES_BLOCK_SIZE);
Aes *aes = &xaes->aes;
Aes *tweak = &xaes->tweak;
byte tmp[AES_BLOCK_SIZE];
XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES
* key setup passed to encrypt direct*/
ret = wc_AesEncryptDirect(tweak, tmp, i);
byte tweak_block[AES_BLOCK_SIZE];
ret = wc_AesEncryptDirect(&xaes->tweak, tweak_block, i);
if (ret != 0)
return ret;
#ifdef HAVE_AES_ECB
/* encrypt all of buffer at once when possible */
if (in != out) { /* can not handle inline */
XMEMCPY(out, tmp, AES_BLOCK_SIZE);
if ((ret = _AesXtsHelper(aes, out, in, sz, AES_ENCRYPTION)) != 0)
XMEMCPY(out, tweak_block, AES_BLOCK_SIZE);
if ((ret = _AesXtsHelper(&xaes->aes, out, in, sz, AES_ENCRYPTION)) != 0)
return ret;
}
#endif
@@ -12639,23 +12637,23 @@ static int AesXtsEncrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
byte buf[AES_BLOCK_SIZE];
XMEMCPY(buf, in, AES_BLOCK_SIZE);
xorbuf(buf, tmp, AES_BLOCK_SIZE);
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesEncryptDirect(aes, out, buf);
if (ret != 0)
return ret;
}
xorbuf(out, tmp, AES_BLOCK_SIZE);
xorbuf(out, tweak_block, AES_BLOCK_SIZE);
/* multiply by shift left and propagate carry */
for (j = 0; j < AES_BLOCK_SIZE; j++) {
byte tmpC;
tmpC = (tmp[j] >> 7) & 0x01;
tmp[j] = (byte)((tmp[j] << 1) + carry);
tmpC = (tweak_block[j] >> 7) & 0x01;
tweak_block[j] = (byte)((tweak_block[j] << 1) + carry);
carry = tmpC;
}
if (carry) {
tmp[0] ^= GF_XTS;
tweak_block[0] ^= GF_XTS;
}
in += AES_BLOCK_SIZE;
@@ -12684,15 +12682,123 @@ static int AesXtsEncrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
XMEMCPY(out, buf2, sz);
}
xorbuf(buf, tmp, AES_BLOCK_SIZE);
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesEncryptDirect(aes, out - AES_BLOCK_SIZE, buf);
if (ret == 0)
xorbuf(out - AES_BLOCK_SIZE, tmp, AES_BLOCK_SIZE);
xorbuf(out - AES_BLOCK_SIZE, tweak_block, AES_BLOCK_SIZE);
}
return ret;
}
#ifdef WOLFSSL_AESXTS_STREAM
/* streaming AES-XTS. (XTS) XEX encryption with Tweak and cipher text Stealing.
*
* xaes AES keys to use for block encrypt/decrypt
* i value to use for tweak
*
* returns 0 on success
*/
static int AesXtsEncryptStart_sw(XtsAes* xaes, const byte* i, byte *tweak_block) {
return wc_AesEncryptDirect(&xaes->tweak, tweak_block, i);
}
/* streaming AES-XTS. (XTS) XEX encryption with Tweak and cipher text Stealing.
*
* xaes AES keys to use for block encrypt/decrypt
* out output buffer to hold cipher text
* in input plain text buffer to encrypt
* sz size of both out and in buffers
*
* returns 0 on success
*/
/* Software AES - XTS Encrypt */
static int AesXtsEncryptUpdate_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
byte *tweak_block)
{
int ret = 0;
word32 blocks = (sz / AES_BLOCK_SIZE);
Aes *aes = &xaes->aes;
#if 0
#ifdef HAVE_AES_ECB
/* encrypt all of buffer at once when possible */
if (in != out) { /* can not handle inline */
XMEMCPY(out, tweak_block, AES_BLOCK_SIZE);
if ((ret = _AesXtsHelper(aes, out, in, sz, AES_ENCRYPTION)) != 0)
return ret;
}
#endif
#endif
while (blocks > 0) {
word32 j;
byte carry = 0;
#if 0 && defined(HAVE_AES_ECB)
if (in == out)
#endif
{ /* check for if inline */
byte buf[AES_BLOCK_SIZE];
XMEMCPY(buf, in, AES_BLOCK_SIZE);
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesEncryptDirect(aes, out, buf);
if (ret != 0)
return ret;
}
xorbuf(out, tweak_block, AES_BLOCK_SIZE);
/* multiply by shift left and propagate carry */
for (j = 0; j < AES_BLOCK_SIZE; j++) {
byte tmpC;
tmpC = (tweak_block[j] >> 7) & 0x01;
tweak_block[j] = (byte)((tweak_block[j] << 1) + carry);
carry = tmpC;
}
if (carry) {
tweak_block[0] ^= GF_XTS;
}
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
sz -= AES_BLOCK_SIZE;
blocks--;
}
/* stealing operation of XTS to handle left overs */
if (sz > 0) {
byte buf[AES_BLOCK_SIZE];
XMEMCPY(buf, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE);
if (sz >= AES_BLOCK_SIZE) { /* extra sanity check before copy */
return BUFFER_E;
}
if (in != out) {
XMEMCPY(out, buf, sz);
XMEMCPY(buf, in, sz);
}
else {
byte buf2[AES_BLOCK_SIZE];
XMEMCPY(buf2, buf, sz);
XMEMCPY(buf, in, sz);
XMEMCPY(out, buf2, sz);
}
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesEncryptDirect(aes, out - AES_BLOCK_SIZE, buf);
if (ret == 0)
xorbuf(out - AES_BLOCK_SIZE, tweak_block, AES_BLOCK_SIZE);
}
return ret;
}
#endif /* WOLFSSL_AESXTS_STREAM */
/* AES with XTS mode. (XTS) XEX encryption with Tweak and cipher text Stealing.
*
* xaes AES keys to use for block encrypt/decrypt
@@ -12773,6 +12879,136 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz,
return ret;
}
#ifdef WOLFSSL_AESXTS_STREAM
int wc_AesXtsEncryptStart(XtsAes* xaes, const byte* i, word32 iSz,
byte *tweak_block)
{
int ret;
Aes *aes;
if ((xaes == NULL) || (tweak_block == NULL)) {
return BAD_FUNC_ARG;
}
if (iSz < AES_BLOCK_SIZE) {
return BAD_FUNC_ARG;
}
aes = &xaes->aes;
if (aes->keylen == 0) {
WOLFSSL_MSG("wc_AesXtsEncrypt called with unset encryption key.");
return BAD_FUNC_ARG;
}
if (iSz < AES_BLOCK_SIZE) {
return BAD_FUNC_ARG;
}
{
#if 0 && defined(WOLFSSL_AESNI)
if (aes->use_aesni) {
SAVE_VECTOR_REGISTERS(return _svr_ret;);
#if defined(HAVE_INTEL_AVX1)
if (IS_INTEL_AVX1(intel_flags)) {
AES_XTS_encrypt_start_avx1(i,
(const byte*)xaes->tweak.key,
tweak_block,
(int)xaes->tweak.rounds);
ret = 0;
}
else
#endif
{
AES_XTS_encrypt_start_aesni(i,
(const byte*)xaes->tweak.key,
tweak_block,
(int)xaes->tweak.rounds);
ret = 0;
}
RESTORE_VECTOR_REGISTERS();
}
else
#endif /* 0 && defined(WOLFSSL_AESNI) */
{
ret = AesXtsEncryptStart_sw(xaes, i, tweak_block);
}
}
return ret;
}
/* AES with XTS mode. (XTS) XEX encryption with Tweak and cipher text Stealing.
*
* xaes AES keys to use for block encrypt/decrypt
* out output buffer to hold cipher text
* in input plain text buffer to encrypt
* sz size of both out and in buffers
* i value to use for tweak
* iSz size of i buffer, should always be AES_BLOCK_SIZE but having this input
* adds a sanity check on how the user calls the function.
*
* returns 0 on success
*/
int wc_AesXtsEncryptUpdate(XtsAes* xaes, byte* out, const byte* in, word32 sz,
byte *tweak_block)
{
int ret;
#if 0 && defined(WOLFSSL_AESNI)
Aes *aes;
#endif
if (xaes == NULL || out == NULL || in == NULL || tweak_block == NULL) {
return BAD_FUNC_ARG;
}
#if 0 && defined(WOLFSSL_AESNI)
aes = &xaes->aes;
#endif
if (sz < AES_BLOCK_SIZE) {
WOLFSSL_MSG("Plain text input too small for encryption");
return BAD_FUNC_ARG;
}
{
#if 0 && defined(WOLFSSL_AESNI)
if (aes->use_aesni) {
SAVE_VECTOR_REGISTERS(return _svr_ret;);
#if defined(HAVE_INTEL_AVX1)
if (IS_INTEL_AVX1(intel_flags)) {
AES_XTS_encrypt_update_avx1(in, out, sz,
(const byte*)aes->key,
tweak_block,
(int)aes->rounds);
ret = 0;
}
else
#endif
{
AES_XTS_encrypt_update_aesni(in, out, sz,
(const byte*)aes->key,
tweak_block,
(int)aes->rounds);
ret = 0;
}
RESTORE_VECTOR_REGISTERS();
}
else
#endif /* 0 && defined(WOLFSSL_AESNI) */
{
ret = AesXtsEncryptUpdate_sw(xaes, out, in, sz, tweak_block);
}
}
return ret;
}
#endif /* WOLFSSL_AESXTS_STREAM */
/* Same process as encryption but use aes_decrypt key.
*
* xaes AES keys to use for block encrypt/decrypt
@@ -12794,16 +13030,12 @@ static int AesXtsDecrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
#else
Aes *aes = &xaes->aes;
#endif
Aes *tweak = &xaes->tweak;
word32 j;
byte carry = 0;
byte tmp[AES_BLOCK_SIZE];
byte tweak_block[AES_BLOCK_SIZE];
byte stl = (sz % AES_BLOCK_SIZE);
XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES
* key setup passed to decrypt direct*/
ret = wc_AesEncryptDirect(tweak, tmp, i);
ret = wc_AesEncryptDirect(&xaes->tweak, tweak_block, i);
if (ret != 0)
return ret;
@@ -12816,7 +13048,7 @@ static int AesXtsDecrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
#ifdef HAVE_AES_ECB
/* decrypt all of buffer at once when possible */
if (in != out) { /* can not handle inline */
XMEMCPY(out, tmp, AES_BLOCK_SIZE);
XMEMCPY(out, tweak_block, AES_BLOCK_SIZE);
if ((ret = _AesXtsHelper(aes, out, in, sz, AES_DECRYPTION)) != 0)
return ret;
}
@@ -12830,23 +13062,23 @@ static int AesXtsDecrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
byte buf[AES_BLOCK_SIZE];
XMEMCPY(buf, in, AES_BLOCK_SIZE);
xorbuf(buf, tmp, AES_BLOCK_SIZE);
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesDecryptDirect(aes, out, buf);
if (ret != 0)
return ret;
}
xorbuf(out, tmp, AES_BLOCK_SIZE);
xorbuf(out, tweak_block, AES_BLOCK_SIZE);
/* multiply by shift left and propagate carry */
for (j = 0; j < AES_BLOCK_SIZE; j++) {
byte tmpC;
tmpC = (tmp[j] >> 7) & 0x01;
tmp[j] = (byte)((tmp[j] << 1) + carry);
tmpC = (tweak_block[j] >> 7) & 0x01;
tweak_block[j] = (byte)((tweak_block[j] << 1) + carry);
carry = tmpC;
}
if (carry) {
tmp[0] ^= GF_XTS;
tweak_block[0] ^= GF_XTS;
}
carry = 0;
@@ -12865,8 +13097,8 @@ static int AesXtsDecrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
for (j = 0; j < AES_BLOCK_SIZE; j++) {
byte tmpC;
tmpC = (tmp[j] >> 7) & 0x01;
tmp2[j] = (byte)((tmp[j] << 1) + carry);
tmpC = (tweak_block[j] >> 7) & 0x01;
tmp2[j] = (byte)((tweak_block[j] << 1) + carry);
carry = tmpC;
}
if (carry) {
@@ -12894,17 +13126,152 @@ static int AesXtsDecrypt_sw(XtsAes* xaes, byte* out, const byte* in, word32 sz,
XMEMCPY(buf, in, sz);
XMEMCPY(out, tmp2, sz);
xorbuf(buf, tmp, AES_BLOCK_SIZE);
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesDecryptDirect(aes, tmp2, buf);
if (ret != 0)
return ret;
xorbuf(tmp2, tmp, AES_BLOCK_SIZE);
xorbuf(tmp2, tweak_block, AES_BLOCK_SIZE);
XMEMCPY(out - AES_BLOCK_SIZE, tmp2, AES_BLOCK_SIZE);
}
return ret;
}
#ifdef WOLFSSL_AESXTS_STREAM
static int AesXtsDecryptStart_sw(XtsAes* xaes, const byte* i,
byte *tweak_block)
{
return wc_AesEncryptDirect(&xaes->tweak, tweak_block, i);
}
/* Same process as encryption but use aes_decrypt key.
*
* xaes AES keys to use for block encrypt/decrypt
* out output buffer to hold plain text
* in input cipher text buffer to decrypt
* sz size of both out and in buffers
* i value to use for tweak
*
* returns 0 on success
*/
/* Software AES - XTS Decrypt */
static int AesXtsDecryptUpdate_sw(XtsAes* xaes, byte* out, const byte* in,
word32 sz, byte *tweak_block)
{
int ret = 0;
word32 blocks = (sz / AES_BLOCK_SIZE);
#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS
Aes *aes = &xaes->aes_decrypt;
#else
Aes *aes = &xaes->aes;
#endif
word32 j;
byte carry = 0;
byte stl = (sz % AES_BLOCK_SIZE);
/* if Stealing then break out of loop one block early to handle special
* case */
if (stl > 0) {
blocks--;
}
#if 0
#ifdef HAVE_AES_ECB
/* decrypt all of buffer at once when possible */
if (in != out) { /* can not handle inline */
XMEMCPY(out, tweak_block, AES_BLOCK_SIZE);
if ((ret = _AesXtsHelper(aes, out, in, sz, AES_DECRYPTION)) != 0)
return ret;
}
#endif
#endif /* 0 */
while (blocks > 0) {
#if 0 && defined(HAVE_AES_ECB)
if (in == out)
#endif
{ /* check for if inline */
byte buf[AES_BLOCK_SIZE];
XMEMCPY(buf, in, AES_BLOCK_SIZE);
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesDecryptDirect(aes, out, buf);
if (ret != 0)
return ret;
}
xorbuf(out, tweak_block, AES_BLOCK_SIZE);
/* multiply by shift left and propagate carry */
for (j = 0; j < AES_BLOCK_SIZE; j++) {
byte tmpC;
tmpC = (tweak_block[j] >> 7) & 0x01;
tweak_block[j] = (byte)((tweak_block[j] << 1) + carry);
carry = tmpC;
}
if (carry) {
tweak_block[0] ^= GF_XTS;
}
carry = 0;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
sz -= AES_BLOCK_SIZE;
blocks--;
}
/* stealing operation of XTS to handle left overs */
if (sz >= AES_BLOCK_SIZE) {
byte buf[AES_BLOCK_SIZE];
byte tmp2[AES_BLOCK_SIZE];
/* multiply by shift left and propagate carry */
for (j = 0; j < AES_BLOCK_SIZE; j++) {
byte tmpC;
tmpC = (tweak_block[j] >> 7) & 0x01;
tmp2[j] = (byte)((tweak_block[j] << 1) + carry);
carry = tmpC;
}
if (carry) {
tmp2[0] ^= GF_XTS;
}
XMEMCPY(buf, in, AES_BLOCK_SIZE);
xorbuf(buf, tmp2, AES_BLOCK_SIZE);
ret = wc_AesDecryptDirect(aes, out, buf);
if (ret != 0)
return ret;
xorbuf(out, tmp2, AES_BLOCK_SIZE);
/* tmp2 holds partial | last */
XMEMCPY(tmp2, out, AES_BLOCK_SIZE);
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
sz -= AES_BLOCK_SIZE;
/* Make buffer with end of cipher text | last */
XMEMCPY(buf, tmp2, AES_BLOCK_SIZE);
if (sz >= AES_BLOCK_SIZE) { /* extra sanity check before copy */
return BUFFER_E;
}
XMEMCPY(buf, in, sz);
XMEMCPY(out, tmp2, sz);
xorbuf(buf, tweak_block, AES_BLOCK_SIZE);
ret = wc_AesDecryptDirect(aes, tmp2, buf);
if (ret != 0)
return ret;
xorbuf(tmp2, tweak_block, AES_BLOCK_SIZE);
XMEMCPY(out - AES_BLOCK_SIZE, tmp2, AES_BLOCK_SIZE);
}
return ret;
}
#endif /* WOLFSSL_AESXTS_STREAM */
/* Same process as encryption but Aes key is AES_DECRYPTION type.
*
* xaes AES keys to use for block encrypt/decrypt
@@ -12987,6 +13354,152 @@ int wc_AesXtsDecrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz,
return ret;
}
}
#ifdef WOLFSSL_AESXTS_STREAM
/* Same process as encryption but Aes key is AES_DECRYPTION type.
*
* xaes AES keys to use for block encrypt/decrypt
* out output buffer to hold plain text
* in input cipher text buffer to decrypt
* sz size of both out and in buffers
* i value to use for tweak
* iSz size of i buffer, should always be AES_BLOCK_SIZE but having this input
* adds a sanity check on how the user calls the function.
*
* returns 0 on success
*/
int wc_AesXtsDecryptStart(XtsAes* xaes, const byte* i, word32 iSz,
byte *tweak_block)
{
int ret;
Aes *aes;
if (xaes == NULL) {
return BAD_FUNC_ARG;
}
#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS
aes = &xaes->aes_decrypt;
#else
aes = &xaes->aes;
#endif
if (aes->keylen == 0) {
WOLFSSL_MSG("wc_AesXtsDecrypt called with unset decryption key.");
return BAD_FUNC_ARG;
}
if (iSz < AES_BLOCK_SIZE) {
return BAD_FUNC_ARG;
}
{
#if 0 && defined(WOLFSSL_AESNI)
if (aes->use_aesni) {
SAVE_VECTOR_REGISTERS(return _svr_ret;);
#if defined(HAVE_INTEL_AVX1)
if (IS_INTEL_AVX1(intel_flags)) {
AES_XTS_decrypt_start_avx1(i,
(const byte*)xaes->tweak.key,
tweak_block,
(int)xaes->tweak.rounds);
ret = 0;
}
else
#endif
{
AES_XTS_decrypt_start_aesni(i,
(const byte*)xaes->tweak.key,
tweak_block,
(int)xaes->tweak.rounds);
ret = 0;
}
RESTORE_VECTOR_REGISTERS();
}
else
#endif /* 0 && defined(WOLFSSL_AESNI) */
{
ret = AesXtsDecryptStart_sw(xaes, i, tweak_block);
}
}
return ret;
}
/* Same process as encryption but Aes key is AES_DECRYPTION type.
*
* xaes AES keys to use for block encrypt/decrypt
* out output buffer to hold plain text
* in input cipher text buffer to decrypt
* sz size of both out and in buffers
* i value to use for tweak
* iSz size of i buffer, should always be AES_BLOCK_SIZE but having this input
* adds a sanity check on how the user calls the function.
*
* returns 0 on success
*/
int wc_AesXtsDecryptUpdate(XtsAes* xaes, byte* out, const byte* in, word32 sz,
byte *tweak_block)
{
int ret;
#if 0 && defined(WOLFSSL_AESNI)
Aes *aes;
#endif
if (xaes == NULL || out == NULL || in == NULL) {
return BAD_FUNC_ARG;
}
#if 0 && defined(WOLFSSL_AESNI)
#ifdef WC_AES_XTS_SUPPORT_SIMULTANEOUS_ENC_AND_DEC_KEYS
aes = &xaes->aes_decrypt;
#else
aes = &xaes->aes;
#endif
#endif
if (sz < AES_BLOCK_SIZE) {
WOLFSSL_MSG("Cipher text input too small for decryption");
return BAD_FUNC_ARG;
}
{
#if 0 && defined(WOLFSSL_AESNI)
if (aes->use_aesni) {
SAVE_VECTOR_REGISTERS(return _svr_ret;);
#if defined(HAVE_INTEL_AVX1)
if (IS_INTEL_AVX1(intel_flags)) {
AES_XTS_decrypt_update_avx1(in, out, sz,
(const byte*)aes->key,
tweak_block,
(int)aes->rounds);
ret = 0;
}
else
#endif
{
AES_XTS_decrypt_update_aesni(in, out, sz,
(const byte*)aes->key,
tweak_block,
(int)aes->rounds);
ret = 0;
}
RESTORE_VECTOR_REGISTERS();
}
else
#endif /* 0 && defined(WOLFSSL_AESNI) */
{
ret = AesXtsDecryptUpdate_sw(xaes, out, in, sz, tweak_block);
}
}
return ret;
}
#endif /* WOLFSSL_AESXTS_STREAM */
#endif /* !WOLFSSL_ARMASM || WOLFSSL_ARMASM_NO_HW_CRYPTO */
/* Same as wc_AesXtsEncryptSector but the sector gets incremented by one every

View File

@@ -669,6 +669,22 @@ WOLFSSL_API int wc_AesXtsDecryptConsecutiveSectors(XtsAes* aes,
byte* out, const byte* in, word32 sz, word64 sector,
word32 sectorSz);
#ifdef WOLFSSL_AESXTS_STREAM
WOLFSSL_API int wc_AesXtsEncryptStart(XtsAes* aes, const byte* i, word32 iSz,
byte *tweak_block);
WOLFSSL_API int wc_AesXtsDecryptStart(XtsAes* aes, const byte* i, word32 iSz,
byte *tweak_block);
WOLFSSL_API int wc_AesXtsEncryptUpdate(XtsAes* aes, byte* out,
const byte* in, word32 sz, byte *tweak_block);
WOLFSSL_API int wc_AesXtsDecryptUpdate(XtsAes* aes, byte* out,
const byte* in, word32 sz, byte *tweak_block);
#endif /* WOLFSSL_AESXTS_STREAM */
WOLFSSL_API int wc_AesXtsFree(XtsAes* aes);
#endif