mirror of
https://github.com/wolfSSL/wolfssl.git
synced 2025-07-30 02:37:28 +02:00
Merge pull request #5698 from SparkiDev/aes_arm32_no_hw
This commit is contained in:
@ -1996,6 +1996,7 @@ AC_ARG_ENABLE([aescbc],
|
||||
if test "$ENABLED_AESCBC" = "no"
|
||||
then
|
||||
AM_CFLAGS="$AM_CFLAGS -DNO_AES_CBC"
|
||||
AM_CCASFLAGS="$AM_CCASFLAGS -DHAVE_AES_CBC"
|
||||
fi
|
||||
|
||||
# AES-CBC length checks (checks that input lengths are multiples of block size)
|
||||
@ -2047,6 +2048,7 @@ AC_ARG_ENABLE([aesccm],
|
||||
if test "$ENABLED_AESCCM" = "yes" || test "$ENABLED_WOLFENGINE" = "yes"
|
||||
then
|
||||
AM_CFLAGS="$AM_CFLAGS -DHAVE_AESCCM"
|
||||
AM_CCASFLAGS="$AM_CCASFLAGS -DHAVE_AESCCM"
|
||||
fi
|
||||
|
||||
# AES-SIV (RFC 5297)
|
||||
@ -4185,6 +4187,8 @@ AS_CASE([$FIPS_VERSION],
|
||||
|
||||
AS_IF([test "$ENABLED_AESGCM" = "no" && (test "$FIPS_VERSION" != "dev" || test "$enable_aesgcm" != "no")],
|
||||
[ENABLED_AESGCM="yes"; AM_CFLAGS="$AM_CFLAGS -DHAVE_AESGCM"])
|
||||
AS_IF([test "$ENABLED_AESGCM" = "no" && (test "$FIPS_VERSION" != "dev" || test "$enable_aesgcm" != "no")],
|
||||
[ENABLED_AESGCM="yes"; AM_CCASFLAGS="$AM_CCASFLAGS -DHAVE_AESGCM"])
|
||||
|
||||
# AES-GCM streaming isn't part of the current FIPS suite.
|
||||
AS_IF([test "$ENABLED_AESGCM_STREAM" = "yes" && (test "$FIPS_VERSION" != "dev" || test "$enable_aesgcm_stream" != "yes")],
|
||||
@ -4242,6 +4246,9 @@ AS_CASE([$FIPS_VERSION],
|
||||
AS_IF([test "x$ENABLED_AESCTR" != "xyes"],
|
||||
[ENABLED_AESCTR="yes"
|
||||
AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_AES_COUNTER"])
|
||||
AS_IF([test "x$ENABLED_AESCTR" != "xyes"],
|
||||
[ENABLED_AESCTR="yes"
|
||||
AM_CCASFLAGS="$AM_CCASFLAGS -DWOLFSSL_AES_COUNTER"])
|
||||
AS_IF([test "x$ENABLED_CMAC" != "xyes"],
|
||||
[ENABLED_CMAC="yes"
|
||||
AM_CFLAGS="$AM_CFLAGS -DWOLFSSL_CMAC"])
|
||||
|
@ -191,8 +191,11 @@ endif
|
||||
|
||||
if BUILD_AES
|
||||
src_libwolfssl_la_SOURCES += wolfcrypt/src/aes.c
|
||||
if BUILD_ARMASM_CRYPTO
|
||||
if BUILD_ARMASM
|
||||
src_libwolfssl_la_SOURCES += wolfcrypt/src/port/arm/armv8-aes.c
|
||||
if !BUILD_ARMASM_CRYPTO
|
||||
src_libwolfssl_la_SOURCES += wolfcrypt/src/port/arm/armv8-32-aes-asm.S
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -401,13 +404,16 @@ endif
|
||||
if !BUILD_FIPS_CURRENT
|
||||
if BUILD_AES
|
||||
src_libwolfssl_la_SOURCES += wolfcrypt/src/aes.c
|
||||
if BUILD_ARMASM_CRYPTO
|
||||
if BUILD_ARMASM
|
||||
src_libwolfssl_la_SOURCES += wolfcrypt/src/port/arm/armv8-aes.c
|
||||
endif
|
||||
if !BUILD_ARMASM_CRYPTO
|
||||
src_libwolfssl_la_SOURCES += wolfcrypt/src/port/arm/armv8-32-aes-asm.S
|
||||
endif !BUILD_ARMASM_CRYPTO
|
||||
endif BUILD_ARMASM
|
||||
if BUILD_AFALG
|
||||
src_libwolfssl_la_SOURCES += wolfcrypt/src/port/af_alg/afalg_aes.c
|
||||
endif
|
||||
endif
|
||||
endif BUILD_AES
|
||||
endif !BUILD_FIPS_CURRENT
|
||||
|
||||
if !BUILD_FIPS_CURRENT
|
||||
|
@ -306,7 +306,7 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits
|
||||
#include <wolfcrypt/src/misc.c>
|
||||
#endif
|
||||
|
||||
#if !defined(WOLFSSL_ARMASM) || defined(WOLFSSL_ARMASM_NO_HW_CRYPTO)
|
||||
#ifndef WOLFSSL_ARMASM
|
||||
|
||||
#ifdef WOLFSSL_IMX6_CAAM_BLOB
|
||||
/* case of possibly not using hardware acceleration for AES but using key
|
||||
@ -4224,7 +4224,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif /* HAVE_AES_DECRYPT */
|
||||
|
||||
#endif /* AES-CBC block */
|
||||
#endif /* HAVE_AES_CBC */
|
||||
@ -4601,7 +4601,7 @@ static WC_INLINE void IncCtr(byte* ctr, word32 ctrSz)
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(WOLFSSL_ARMASM) && !defined(WOLFSSL_ARMASM_NO_HW_CRYPTO)
|
||||
#ifdef WOLFSSL_ARMASM
|
||||
/* implementation is located in wolfcrypt/src/port/arm/armv8-aes.c */
|
||||
|
||||
#elif defined(WOLFSSL_AFALG)
|
||||
@ -9933,7 +9933,7 @@ int wc_AesCcmCheckTagSize(int sz)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(WOLFSSL_ARMASM) && !defined(WOLFSSL_ARMASM_NO_HW_CRYPTO)
|
||||
#ifdef WOLFSSL_ARMASM
|
||||
/* implementation located in wolfcrypt/src/port/arm/armv8-aes.c */
|
||||
|
||||
#elif defined(HAVE_COLDFIRE_SEC)
|
||||
|
3594
wolfcrypt/src/port/arm/armv8-32-aes-asm.S
Normal file
3594
wolfcrypt/src/port/arm/armv8-32-aes-asm.S
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -32,8 +32,8 @@
|
||||
|
||||
#include <wolfssl/wolfcrypt/settings.h>
|
||||
|
||||
#if !defined(NO_AES) && defined(WOLFSSL_ARMASM) && \
|
||||
!defined(WOLFSSL_ARMASM_NO_HW_CRYPTO)
|
||||
#if !defined(NO_AES) && defined(WOLFSSL_ARMASM)
|
||||
#ifndef WOLFSSL_ARMASM_NO_HW_CRYPTO
|
||||
|
||||
#ifdef HAVE_FIPS
|
||||
#undef HAVE_FIPS
|
||||
@ -5458,4 +5458,847 @@ int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len)
|
||||
}
|
||||
#endif /* HAVE_AES_DECRYPT */
|
||||
#endif /* WOLFSSL_AES_DIRECT */
|
||||
#else
|
||||
#include <wolfssl/wolfcrypt/logging.h>
|
||||
#include <wolfssl/wolfcrypt/error-crypt.h>
|
||||
#include <wolfssl/wolfcrypt/aes.h>
|
||||
#ifdef NO_INLINE
|
||||
#include <wolfssl/wolfcrypt/misc.h>
|
||||
#else
|
||||
#define WOLFSSL_MISC_INCLUDED
|
||||
#include <wolfcrypt/src/misc.c>
|
||||
#endif
|
||||
|
||||
extern void AES_set_encrypt_key(const unsigned char* key, word32 len,
|
||||
unsigned char* ks);
|
||||
extern void AES_invert_key(unsigned char* ks, word32 rounds);
|
||||
extern void AES_ECB_encrypt(const unsigned char* in, unsigned char* out,
|
||||
unsigned long len, const unsigned char* ks, int nr);
|
||||
extern void AES_ECB_decrypt(const unsigned char* in, unsigned char* out,
|
||||
unsigned long len, const unsigned char* ks, int nr);
|
||||
extern void AES_CBC_encrypt(const unsigned char* in, unsigned char* out,
|
||||
unsigned long len, const unsigned char* ks, int nr, unsigned char* iv);
|
||||
extern void AES_CBC_decrypt(const unsigned char* in, unsigned char* out,
|
||||
unsigned long len, const unsigned char* ks, int nr, unsigned char* iv);
|
||||
extern void AES_CTR_encrypt(const unsigned char* in, unsigned char* out,
|
||||
unsigned long len, const unsigned char* ks, int nr, unsigned char* ctr);
|
||||
extern void GCM_gmult_len(byte* x, const byte m[32][AES_BLOCK_SIZE],
|
||||
const unsigned char* data, unsigned long len);
|
||||
extern void AES_GCM_encrypt(const unsigned char* in, unsigned char* out,
|
||||
unsigned long len, const unsigned char* ks, int nr, unsigned char* ctr);
|
||||
|
||||
int wc_AesSetKey(Aes* aes, const byte* userKey, word32 keylen,
|
||||
const byte* iv, int dir)
|
||||
{
|
||||
#if defined(AES_MAX_KEY_SIZE)
|
||||
const word32 max_key_len = (AES_MAX_KEY_SIZE / 8);
|
||||
#endif
|
||||
|
||||
if (((keylen != 16) && (keylen != 24) && (keylen != 32)) ||
|
||||
(aes == NULL) || (userKey == NULL)) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
#if defined(AES_MAX_KEY_SIZE)
|
||||
/* Check key length */
|
||||
if (keylen > max_key_len) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WOLFSSL_AES_COUNTER
|
||||
aes->left = 0;
|
||||
#endif /* WOLFSSL_AES_COUNTER */
|
||||
|
||||
aes->keylen = keylen;
|
||||
aes->rounds = keylen/4 + 6;
|
||||
|
||||
AES_set_encrypt_key(userKey, keylen * 8, (byte*)aes->key);
|
||||
#ifdef HAVE_AES_DECRYPT
|
||||
if (dir == AES_DECRYPTION) {
|
||||
AES_invert_key((byte*)aes->key, aes->rounds);
|
||||
}
|
||||
#else
|
||||
(void)dir;
|
||||
#endif
|
||||
|
||||
return wc_AesSetIV(aes, iv);
|
||||
}
|
||||
|
||||
#if defined(WOLFSSL_AES_DIRECT) || defined(WOLFSSL_AES_COUNTER)
|
||||
int wc_AesSetKeyDirect(Aes* aes, const byte* userKey, word32 keylen,
|
||||
const byte* iv, int dir)
|
||||
{
|
||||
return wc_AesSetKey(aes, userKey, keylen, iv, dir);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* wc_AesSetIV is shared between software and hardware */
|
||||
int wc_AesSetIV(Aes* aes, const byte* iv)
|
||||
{
|
||||
if (aes == NULL)
|
||||
return BAD_FUNC_ARG;
|
||||
|
||||
if (iv)
|
||||
XMEMCPY(aes->reg, iv, AES_BLOCK_SIZE);
|
||||
else
|
||||
XMEMSET(aes->reg, 0, AES_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(HAVE_AESCCM) || defined(WOLFSSL_AES_DIRECT)
|
||||
static int wc_AesEncrypt(Aes* aes, const byte* inBlock, byte* outBlock)
|
||||
{
|
||||
if (aes->rounds != 10 && aes->rounds != 12 && aes->rounds != 14) {
|
||||
WOLFSSL_ERROR_VERBOSE(KEYUSAGE_E);
|
||||
return KEYUSAGE_E;
|
||||
}
|
||||
|
||||
AES_ECB_encrypt(inBlock, outBlock, AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_AES_DECRYPT) && defined(WOLFSSL_AES_DIRECT)
|
||||
static int wc_AesDecrypt(Aes* aes, const byte* inBlock, byte* outBlock)
|
||||
{
|
||||
if (aes->rounds != 10 && aes->rounds != 12 && aes->rounds != 14) {
|
||||
WOLFSSL_ERROR_VERBOSE(KEYUSAGE_E);
|
||||
return KEYUSAGE_E;
|
||||
}
|
||||
|
||||
AES_ECB_decrypt(inBlock, outBlock, AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* AES-DIRECT */
|
||||
#if defined(WOLFSSL_AES_DIRECT)
|
||||
/* Allow direct access to one block encrypt */
|
||||
int wc_AesEncryptDirect(Aes* aes, byte* out, const byte* in)
|
||||
{
|
||||
if (aes == NULL || out == NULL || in == NULL) {
|
||||
WOLFSSL_MSG("Invalid input to wc_AesEncryptDirect");
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
return wc_AesEncrypt(aes, in, out);
|
||||
}
|
||||
|
||||
#ifdef HAVE_AES_DECRYPT
|
||||
/* Allow direct access to one block decrypt */
|
||||
int wc_AesDecryptDirect(Aes* aes, byte* out, const byte* in)
|
||||
{
|
||||
if (aes == NULL || out == NULL || in == NULL) {
|
||||
WOLFSSL_MSG("Invalid input to wc_AesDecryptDirect");
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
return wc_AesDecrypt(aes, in, out);
|
||||
}
|
||||
#endif /* HAVE_AES_DECRYPT */
|
||||
#endif /* WOLFSSL_AES_DIRECT */
|
||||
|
||||
#ifdef HAVE_AES_CBC
|
||||
int wc_AesCbcEncrypt(Aes* aes, byte* out, const byte* in, word32 sz)
|
||||
{
|
||||
if (aes == NULL || out == NULL || in == NULL) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
if (aes->rounds != 10 && aes->rounds != 12 && aes->rounds != 14) {
|
||||
WOLFSSL_ERROR_VERBOSE(KEYUSAGE_E);
|
||||
return KEYUSAGE_E;
|
||||
}
|
||||
|
||||
if (sz == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
AES_CBC_encrypt(in, out, sz, (const unsigned char*)aes->key, aes->rounds,
|
||||
(unsigned char*)aes->reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef HAVE_AES_DECRYPT
|
||||
int wc_AesCbcDecrypt(Aes* aes, byte* out, const byte* in, word32 sz)
|
||||
{
|
||||
if (aes == NULL || out == NULL || in == NULL) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
if (aes->rounds != 10 && aes->rounds != 12 && aes->rounds != 14) {
|
||||
WOLFSSL_ERROR_VERBOSE(KEYUSAGE_E);
|
||||
return KEYUSAGE_E;
|
||||
}
|
||||
|
||||
if (sz == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
AES_CBC_decrypt(in, out, sz, (const unsigned char*)aes->key, aes->rounds,
|
||||
(unsigned char*)aes->reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef WOLFSSL_AES_COUNTER
|
||||
int wc_AesCtrEncrypt(Aes* aes, byte* out, const byte* in, word32 sz)
|
||||
{
|
||||
byte* tmp;
|
||||
word32 numBlocks;
|
||||
|
||||
if (aes == NULL || out == NULL || in == NULL) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
if (aes->rounds != 10 && aes->rounds != 12 && aes->rounds != 14) {
|
||||
WOLFSSL_ERROR_VERBOSE(KEYUSAGE_E);
|
||||
return KEYUSAGE_E;
|
||||
}
|
||||
|
||||
tmp = (byte*)aes->tmp + AES_BLOCK_SIZE - aes->left;
|
||||
/* consume any unused bytes left in aes->tmp */
|
||||
while ((aes->left != 0) && (sz != 0)) {
|
||||
*(out++) = *(in++) ^ *(tmp++);
|
||||
aes->left--;
|
||||
sz--;
|
||||
}
|
||||
|
||||
/* do as many block size ops as possible */
|
||||
numBlocks = sz / AES_BLOCK_SIZE;
|
||||
if (numBlocks > 0) {
|
||||
AES_CTR_encrypt(in, out, numBlocks * AES_BLOCK_SIZE, (byte*)aes->key,
|
||||
aes->rounds, (byte*)aes->reg);
|
||||
|
||||
sz -= numBlocks * AES_BLOCK_SIZE;
|
||||
out += numBlocks * AES_BLOCK_SIZE;
|
||||
in += numBlocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
/* handle non block size remaining */
|
||||
if (sz) {
|
||||
byte zeros[AES_BLOCK_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0 };
|
||||
|
||||
AES_CTR_encrypt(zeros, (byte*)aes->tmp, AES_BLOCK_SIZE, (byte*)aes->key,
|
||||
aes->rounds, (byte*)aes->reg);
|
||||
|
||||
aes->left = AES_BLOCK_SIZE;
|
||||
tmp = (byte*)aes->tmp;
|
||||
|
||||
while (sz--) {
|
||||
*(out++) = *(in++) ^ *(tmp++);
|
||||
aes->left--;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_AESCCM
|
||||
/* Software version of AES-CCM from wolfcrypt/src/aes.c
|
||||
* Gets some speed up from hardware acceleration of wc_AesEncrypt */
|
||||
|
||||
static void roll_x(Aes* aes, const byte* in, word32 inSz, byte* out)
|
||||
{
|
||||
/* process the bulk of the data */
|
||||
while (inSz >= AES_BLOCK_SIZE) {
|
||||
xorbuf(out, in, AES_BLOCK_SIZE);
|
||||
in += AES_BLOCK_SIZE;
|
||||
inSz -= AES_BLOCK_SIZE;
|
||||
|
||||
wc_AesEncrypt(aes, out, out);
|
||||
}
|
||||
|
||||
/* process remainder of the data */
|
||||
if (inSz > 0) {
|
||||
xorbuf(out, in, inSz);
|
||||
wc_AesEncrypt(aes, out, out);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void roll_auth(Aes* aes, const byte* in, word32 inSz, byte* out)
|
||||
{
|
||||
word32 authLenSz;
|
||||
word32 remainder;
|
||||
|
||||
/* encode the length in */
|
||||
if (inSz <= 0xFEFF) {
|
||||
authLenSz = 2;
|
||||
out[0] ^= ((inSz & 0xFF00) >> 8);
|
||||
out[1] ^= (inSz & 0x00FF);
|
||||
}
|
||||
else if (inSz <= 0xFFFFFFFF) {
|
||||
authLenSz = 6;
|
||||
out[0] ^= 0xFF; out[1] ^= 0xFE;
|
||||
out[2] ^= ((inSz & 0xFF000000) >> 24);
|
||||
out[3] ^= ((inSz & 0x00FF0000) >> 16);
|
||||
out[4] ^= ((inSz & 0x0000FF00) >> 8);
|
||||
out[5] ^= (inSz & 0x000000FF);
|
||||
}
|
||||
/* Note, the protocol handles auth data up to 2^64, but we are
|
||||
* using 32-bit sizes right now, so the bigger data isn't handled
|
||||
* else if (inSz <= 0xFFFFFFFFFFFFFFFF) {} */
|
||||
else
|
||||
return;
|
||||
|
||||
/* start fill out the rest of the first block */
|
||||
remainder = AES_BLOCK_SIZE - authLenSz;
|
||||
if (inSz >= remainder) {
|
||||
/* plenty of bulk data to fill the remainder of this block */
|
||||
xorbuf(out + authLenSz, in, remainder);
|
||||
inSz -= remainder;
|
||||
in += remainder;
|
||||
}
|
||||
else {
|
||||
/* not enough bulk data, copy what is available, and pad zero */
|
||||
xorbuf(out + authLenSz, in, inSz);
|
||||
inSz = 0;
|
||||
}
|
||||
wc_AesEncrypt(aes, out, out);
|
||||
|
||||
if (inSz > 0)
|
||||
roll_x(aes, in, inSz, out);
|
||||
}
|
||||
|
||||
|
||||
static WC_INLINE void AesCcmCtrInc(byte* B, word32 lenSz)
|
||||
{
|
||||
word32 i;
|
||||
|
||||
for (i = 0; i < lenSz; i++) {
|
||||
if (++B[AES_BLOCK_SIZE - 1 - i] != 0) return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* return 0 on success */
|
||||
int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz,
|
||||
const byte* nonce, word32 nonceSz,
|
||||
byte* authTag, word32 authTagSz,
|
||||
const byte* authIn, word32 authInSz)
|
||||
{
|
||||
byte A[AES_BLOCK_SIZE];
|
||||
byte B[AES_BLOCK_SIZE];
|
||||
byte lenSz;
|
||||
word32 i;
|
||||
byte mask = 0xFF;
|
||||
word32 wordSz = (word32)sizeof(word32);
|
||||
|
||||
/* sanity check on arguments */
|
||||
if (aes == NULL || out == NULL || in == NULL || nonce == NULL
|
||||
|| authTag == NULL || nonceSz < 7 || nonceSz > 13)
|
||||
return BAD_FUNC_ARG;
|
||||
|
||||
if (wc_AesCcmCheckTagSize(authTagSz) != 0) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
XMEMCPY(B+1, nonce, nonceSz);
|
||||
lenSz = AES_BLOCK_SIZE - 1 - (byte)nonceSz;
|
||||
B[0] = (authInSz > 0 ? 64 : 0)
|
||||
+ (8 * (((byte)authTagSz - 2) / 2))
|
||||
+ (lenSz - 1);
|
||||
for (i = 0; i < lenSz; i++) {
|
||||
if (mask && i >= wordSz)
|
||||
mask = 0x00;
|
||||
B[AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask;
|
||||
}
|
||||
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
|
||||
if (authInSz > 0)
|
||||
roll_auth(aes, authIn, authInSz, A);
|
||||
if (inSz > 0)
|
||||
roll_x(aes, in, inSz, A);
|
||||
XMEMCPY(authTag, A, authTagSz);
|
||||
|
||||
B[0] = lenSz - 1;
|
||||
for (i = 0; i < lenSz; i++)
|
||||
B[AES_BLOCK_SIZE - 1 - i] = 0;
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
xorbuf(authTag, A, authTagSz);
|
||||
|
||||
B[15] = 1;
|
||||
while (inSz >= AES_BLOCK_SIZE) {
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
xorbuf(A, in, AES_BLOCK_SIZE);
|
||||
XMEMCPY(out, A, AES_BLOCK_SIZE);
|
||||
|
||||
AesCcmCtrInc(B, lenSz);
|
||||
inSz -= AES_BLOCK_SIZE;
|
||||
in += AES_BLOCK_SIZE;
|
||||
out += AES_BLOCK_SIZE;
|
||||
}
|
||||
if (inSz > 0) {
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
xorbuf(A, in, inSz);
|
||||
XMEMCPY(out, A, inSz);
|
||||
}
|
||||
|
||||
ForceZero(A, AES_BLOCK_SIZE);
|
||||
ForceZero(B, AES_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef HAVE_AES_DECRYPT
|
||||
int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz,
|
||||
const byte* nonce, word32 nonceSz,
|
||||
const byte* authTag, word32 authTagSz,
|
||||
const byte* authIn, word32 authInSz)
|
||||
{
|
||||
byte A[AES_BLOCK_SIZE];
|
||||
byte B[AES_BLOCK_SIZE];
|
||||
byte* o;
|
||||
byte lenSz;
|
||||
word32 i, oSz;
|
||||
int result = 0;
|
||||
byte mask = 0xFF;
|
||||
word32 wordSz = (word32)sizeof(word32);
|
||||
|
||||
/* sanity check on arguments */
|
||||
if (aes == NULL || out == NULL || in == NULL || nonce == NULL
|
||||
|| authTag == NULL || nonceSz < 7 || nonceSz > 13)
|
||||
return BAD_FUNC_ARG;
|
||||
|
||||
if (wc_AesCcmCheckTagSize(authTagSz) != 0) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
o = out;
|
||||
oSz = inSz;
|
||||
XMEMCPY(B+1, nonce, nonceSz);
|
||||
lenSz = AES_BLOCK_SIZE - 1 - (byte)nonceSz;
|
||||
|
||||
B[0] = lenSz - 1;
|
||||
for (i = 0; i < lenSz; i++)
|
||||
B[AES_BLOCK_SIZE - 1 - i] = 0;
|
||||
B[15] = 1;
|
||||
|
||||
while (oSz >= AES_BLOCK_SIZE) {
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
xorbuf(A, in, AES_BLOCK_SIZE);
|
||||
XMEMCPY(o, A, AES_BLOCK_SIZE);
|
||||
|
||||
AesCcmCtrInc(B, lenSz);
|
||||
oSz -= AES_BLOCK_SIZE;
|
||||
in += AES_BLOCK_SIZE;
|
||||
o += AES_BLOCK_SIZE;
|
||||
}
|
||||
if (inSz > 0) {
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
xorbuf(A, in, oSz);
|
||||
XMEMCPY(o, A, oSz);
|
||||
}
|
||||
|
||||
for (i = 0; i < lenSz; i++)
|
||||
B[AES_BLOCK_SIZE - 1 - i] = 0;
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
|
||||
o = out;
|
||||
oSz = inSz;
|
||||
|
||||
B[0] = (authInSz > 0 ? 64 : 0)
|
||||
+ (8 * (((byte)authTagSz - 2) / 2))
|
||||
+ (lenSz - 1);
|
||||
for (i = 0; i < lenSz; i++) {
|
||||
if (mask && i >= wordSz)
|
||||
mask = 0x00;
|
||||
B[AES_BLOCK_SIZE - 1 - i] = (inSz >> ((8 * i) & mask)) & mask;
|
||||
}
|
||||
|
||||
wc_AesEncrypt(aes, B, A);
|
||||
|
||||
if (authInSz > 0)
|
||||
roll_auth(aes, authIn, authInSz, A);
|
||||
if (inSz > 0)
|
||||
roll_x(aes, o, oSz, A);
|
||||
|
||||
B[0] = lenSz - 1;
|
||||
for (i = 0; i < lenSz; i++)
|
||||
B[AES_BLOCK_SIZE - 1 - i] = 0;
|
||||
wc_AesEncrypt(aes, B, B);
|
||||
xorbuf(A, B, authTagSz);
|
||||
|
||||
if (ConstantCompare(A, authTag, authTagSz) != 0) {
|
||||
/* If the authTag check fails, don't keep the decrypted data.
|
||||
* Unfortunately, you need the decrypted data to calculate the
|
||||
* check value. */
|
||||
XMEMSET(out, 0, inSz);
|
||||
result = AES_CCM_AUTH_E;
|
||||
}
|
||||
|
||||
ForceZero(A, AES_BLOCK_SIZE);
|
||||
ForceZero(B, AES_BLOCK_SIZE);
|
||||
o = NULL;
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif /* HAVE_AES_DECRYPT */
|
||||
#endif /* HAVE_AESCCM */
|
||||
|
||||
#ifdef HAVE_AESGCM
|
||||
static WC_INLINE void RIGHTSHIFTX(byte* x)
|
||||
{
|
||||
int i;
|
||||
int carryIn = 0;
|
||||
byte borrow = (0x00 - (x[15] & 0x01)) & 0xE1;
|
||||
|
||||
for (i = 0; i < AES_BLOCK_SIZE; i++) {
|
||||
int carryOut = (x[i] & 0x01) << 7;
|
||||
x[i] = (byte) ((x[i] >> 1) | carryIn);
|
||||
carryIn = carryOut;
|
||||
}
|
||||
x[0] ^= borrow;
|
||||
}
|
||||
|
||||
static void GenerateM0(Aes* aes)
|
||||
{
|
||||
int i;
|
||||
byte (*m)[AES_BLOCK_SIZE] = aes->M0;
|
||||
|
||||
/* 0 times -> 0x0 */
|
||||
XMEMSET(m[0x0], 0, AES_BLOCK_SIZE);
|
||||
/* 1 times -> 0x8 */
|
||||
XMEMCPY(m[0x8], aes->H, AES_BLOCK_SIZE);
|
||||
/* 2 times -> 0x4 */
|
||||
XMEMCPY(m[0x4], m[0x8], AES_BLOCK_SIZE);
|
||||
RIGHTSHIFTX(m[0x4]);
|
||||
/* 4 times -> 0x2 */
|
||||
XMEMCPY(m[0x2], m[0x4], AES_BLOCK_SIZE);
|
||||
RIGHTSHIFTX(m[0x2]);
|
||||
/* 8 times -> 0x1 */
|
||||
XMEMCPY(m[0x1], m[0x2], AES_BLOCK_SIZE);
|
||||
RIGHTSHIFTX(m[0x1]);
|
||||
|
||||
/* 0x3 */
|
||||
XMEMCPY(m[0x3], m[0x2], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0x3], m[0x1], AES_BLOCK_SIZE);
|
||||
|
||||
/* 0x5 -> 0x7 */
|
||||
XMEMCPY(m[0x5], m[0x4], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0x5], m[0x1], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0x6], m[0x4], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0x6], m[0x2], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0x7], m[0x4], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0x7], m[0x3], AES_BLOCK_SIZE);
|
||||
|
||||
/* 0x9 -> 0xf */
|
||||
XMEMCPY(m[0x9], m[0x8], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0x9], m[0x1], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0xa], m[0x8], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0xa], m[0x2], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0xb], m[0x8], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0xb], m[0x3], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0xc], m[0x8], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0xc], m[0x4], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0xd], m[0x8], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0xd], m[0x5], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0xe], m[0x8], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0xe], m[0x6], AES_BLOCK_SIZE);
|
||||
XMEMCPY(m[0xf], m[0x8], AES_BLOCK_SIZE);
|
||||
xorbuf (m[0xf], m[0x7], AES_BLOCK_SIZE);
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
word32* m32 = (word32*)aes->M0[i];
|
||||
m32[0] = ByteReverseWord32(m32[0]);
|
||||
m32[1] = ByteReverseWord32(m32[1]);
|
||||
m32[2] = ByteReverseWord32(m32[2]);
|
||||
m32[3] = ByteReverseWord32(m32[3]);
|
||||
}
|
||||
}
|
||||
|
||||
int wc_AesGcmSetKey(Aes* aes, const byte* key, word32 len)
|
||||
{
|
||||
int ret;
|
||||
byte iv[AES_BLOCK_SIZE];
|
||||
|
||||
if (aes == NULL) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
if ((len != 16) && (len != 24) && (len != 32)) {
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
XMEMSET(iv, 0, AES_BLOCK_SIZE);
|
||||
ret = wc_AesSetKey(aes, key, len, iv, AES_ENCRYPTION);
|
||||
|
||||
if (ret == 0) {
|
||||
AES_ECB_encrypt(iv, aes->H, AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds);
|
||||
GenerateM0(aes);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static WC_INLINE void IncrementGcmCounter(byte* inOutCtr)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* in network byte order so start at end and work back */
|
||||
for (i = AES_BLOCK_SIZE - 1; i >= AES_BLOCK_SIZE - CTR_SZ; i--) {
|
||||
if (++inOutCtr[i]) /* we're done unless we overflow */
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static WC_INLINE void FlattenSzInBits(byte* buf, word32 sz)
|
||||
{
|
||||
/* Multiply the sz by 8 */
|
||||
word32 szHi = (sz >> (8*sizeof(sz) - 3));
|
||||
sz <<= 3;
|
||||
|
||||
/* copy over the words of the sz into the destination buffer */
|
||||
buf[0] = (szHi >> 24) & 0xff;
|
||||
buf[1] = (szHi >> 16) & 0xff;
|
||||
buf[2] = (szHi >> 8) & 0xff;
|
||||
buf[3] = szHi & 0xff;
|
||||
buf[4] = (sz >> 24) & 0xff;
|
||||
buf[5] = (sz >> 16) & 0xff;
|
||||
buf[6] = (sz >> 8) & 0xff;
|
||||
buf[7] = sz & 0xff;
|
||||
}
|
||||
|
||||
static void gcm_ghash_arm32(Aes* aes, const byte* a, word32 aSz, const byte* c,
|
||||
word32 cSz, byte* s, word32 sSz)
|
||||
{
|
||||
byte x[AES_BLOCK_SIZE];
|
||||
byte scratch[AES_BLOCK_SIZE];
|
||||
word32 blocks, partial;
|
||||
|
||||
if (aes == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
XMEMSET(x, 0, AES_BLOCK_SIZE);
|
||||
|
||||
/* Hash in A, the Additional Authentication Data */
|
||||
if (aSz != 0 && a != NULL) {
|
||||
blocks = aSz / AES_BLOCK_SIZE;
|
||||
partial = aSz % AES_BLOCK_SIZE;
|
||||
if (blocks > 0) {
|
||||
GCM_gmult_len(x, aes->M0, a, blocks * AES_BLOCK_SIZE);
|
||||
a += blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
if (partial != 0) {
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
XMEMCPY(scratch, a, partial);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/* Hash in C, the Ciphertext */
|
||||
if (cSz != 0 && c != NULL) {
|
||||
blocks = cSz / AES_BLOCK_SIZE;
|
||||
partial = cSz % AES_BLOCK_SIZE;
|
||||
if (blocks > 0) {
|
||||
GCM_gmult_len(x, aes->M0, c, blocks * AES_BLOCK_SIZE);
|
||||
c += blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
if (partial != 0) {
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
XMEMCPY(scratch, c, partial);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/* Hash in the lengths of A and C in bits */
|
||||
FlattenSzInBits(&scratch[0], aSz);
|
||||
FlattenSzInBits(&scratch[8], cSz);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
|
||||
/* Copy the result into s. */
|
||||
XMEMCPY(s, x, sSz);
|
||||
}
|
||||
|
||||
int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz,
|
||||
const byte* iv, word32 ivSz,
|
||||
byte* authTag, word32 authTagSz,
|
||||
const byte* authIn, word32 authInSz)
|
||||
{
|
||||
word32 blocks;
|
||||
word32 partial;
|
||||
byte counter[AES_BLOCK_SIZE];
|
||||
byte initialCounter[AES_BLOCK_SIZE];
|
||||
byte x[AES_BLOCK_SIZE];
|
||||
byte scratch[AES_BLOCK_SIZE];
|
||||
|
||||
/* sanity checks */
|
||||
if (aes == NULL || (iv == NULL && ivSz > 0) || (authTag == NULL) ||
|
||||
(authIn == NULL && authInSz > 0) || (ivSz == 0)) {
|
||||
WOLFSSL_MSG("a NULL parameter passed in when size is larger than 0");
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
if (authTagSz < WOLFSSL_MIN_AUTH_TAG_SZ || authTagSz > AES_BLOCK_SIZE) {
|
||||
WOLFSSL_MSG("GcmEncrypt authTagSz error");
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
if (aes->rounds != 10 && aes->rounds != 12 && aes->rounds != 14) {
|
||||
WOLFSSL_ERROR_VERBOSE(KEYUSAGE_E);
|
||||
return KEYUSAGE_E;
|
||||
}
|
||||
|
||||
XMEMSET(initialCounter, 0, AES_BLOCK_SIZE);
|
||||
if (ivSz == GCM_NONCE_MID_SZ) {
|
||||
XMEMCPY(initialCounter, iv, ivSz);
|
||||
initialCounter[AES_BLOCK_SIZE - 1] = 1;
|
||||
}
|
||||
else {
|
||||
gcm_ghash_arm32(aes, NULL, 0, iv, ivSz, initialCounter, AES_BLOCK_SIZE);
|
||||
}
|
||||
XMEMCPY(counter, initialCounter, AES_BLOCK_SIZE);
|
||||
|
||||
/* Hash in the Additional Authentication Data */
|
||||
XMEMSET(x, 0, AES_BLOCK_SIZE);
|
||||
if (authInSz != 0 && authIn != NULL) {
|
||||
blocks = authInSz / AES_BLOCK_SIZE;
|
||||
partial = authInSz % AES_BLOCK_SIZE;
|
||||
if (blocks > 0) {
|
||||
GCM_gmult_len(x, aes->M0, authIn, blocks * AES_BLOCK_SIZE);
|
||||
authIn += blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
if (partial != 0) {
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
XMEMCPY(scratch, authIn, partial);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/* do as many blocks as possible */
|
||||
blocks = sz / AES_BLOCK_SIZE;
|
||||
partial = sz % AES_BLOCK_SIZE;
|
||||
if (blocks > 0) {
|
||||
AES_GCM_encrypt(in, out, blocks * AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds, counter);
|
||||
GCM_gmult_len(x, aes->M0, out, blocks * AES_BLOCK_SIZE);
|
||||
in += blocks * AES_BLOCK_SIZE;
|
||||
out += blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
/* take care of partial block sizes leftover */
|
||||
if (partial != 0) {
|
||||
AES_GCM_encrypt(in, scratch, AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds, counter);
|
||||
XMEMCPY(out, scratch, partial);
|
||||
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
XMEMCPY(scratch, out, partial);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
/* Hash in the lengths of A and C in bits */
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
FlattenSzInBits(&scratch[0], authInSz);
|
||||
FlattenSzInBits(&scratch[8], sz);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
if (authTagSz > AES_BLOCK_SIZE) {
|
||||
XMEMCPY(authTag, x, AES_BLOCK_SIZE);
|
||||
}
|
||||
else {
|
||||
/* authTagSz can be smaller than AES_BLOCK_SIZE */
|
||||
XMEMCPY(authTag, x, authTagSz);
|
||||
}
|
||||
|
||||
/* Auth tag calculation. */
|
||||
AES_ECB_encrypt(initialCounter, scratch, AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds);
|
||||
xorbuf(authTag, scratch, authTagSz);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz,
|
||||
const byte* iv, word32 ivSz, const byte* authTag, word32 authTagSz,
|
||||
const byte* authIn, word32 authInSz)
|
||||
{
|
||||
word32 blocks;
|
||||
word32 partial;
|
||||
byte counter[AES_BLOCK_SIZE];
|
||||
byte initialCounter[AES_BLOCK_SIZE];
|
||||
byte scratch[AES_BLOCK_SIZE];
|
||||
byte x[AES_BLOCK_SIZE];
|
||||
|
||||
/* sanity checks */
|
||||
if (aes == NULL || iv == NULL || (sz != 0 && (in == NULL || out == NULL)) ||
|
||||
authTag == NULL || authTagSz > AES_BLOCK_SIZE || authTagSz == 0 ||
|
||||
ivSz == 0) {
|
||||
WOLFSSL_MSG("a NULL parameter passed in when size is larger than 0");
|
||||
return BAD_FUNC_ARG;
|
||||
}
|
||||
|
||||
XMEMSET(initialCounter, 0, AES_BLOCK_SIZE);
|
||||
if (ivSz == GCM_NONCE_MID_SZ) {
|
||||
XMEMCPY(initialCounter, iv, ivSz);
|
||||
initialCounter[AES_BLOCK_SIZE - 1] = 1;
|
||||
}
|
||||
else {
|
||||
gcm_ghash_arm32(aes, NULL, 0, iv, ivSz, initialCounter, AES_BLOCK_SIZE);
|
||||
}
|
||||
XMEMCPY(counter, initialCounter, AES_BLOCK_SIZE);
|
||||
|
||||
XMEMSET(x, 0, AES_BLOCK_SIZE);
|
||||
/* Hash in the Additional Authentication Data */
|
||||
if (authInSz != 0 && authIn != NULL) {
|
||||
blocks = authInSz / AES_BLOCK_SIZE;
|
||||
partial = authInSz % AES_BLOCK_SIZE;
|
||||
if (blocks > 0) {
|
||||
GCM_gmult_len(x, aes->M0, authIn, blocks * AES_BLOCK_SIZE);
|
||||
authIn += blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
if (partial != 0) {
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
XMEMCPY(scratch, authIn, partial);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
blocks = sz / AES_BLOCK_SIZE;
|
||||
partial = sz % AES_BLOCK_SIZE;
|
||||
/* do as many blocks as possible */
|
||||
if (blocks > 0) {
|
||||
GCM_gmult_len(x, aes->M0, in, blocks * AES_BLOCK_SIZE);
|
||||
|
||||
AES_GCM_encrypt(in, out, blocks * AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds, counter);
|
||||
in += blocks * AES_BLOCK_SIZE;
|
||||
out += blocks * AES_BLOCK_SIZE;
|
||||
}
|
||||
if (partial != 0) {
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
XMEMCPY(scratch, in, partial);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
|
||||
AES_GCM_encrypt(in, scratch, AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds, counter);
|
||||
XMEMCPY(out, scratch, partial);
|
||||
}
|
||||
|
||||
XMEMSET(scratch, 0, AES_BLOCK_SIZE);
|
||||
FlattenSzInBits(&scratch[0], authInSz);
|
||||
FlattenSzInBits(&scratch[8], sz);
|
||||
GCM_gmult_len(x, aes->M0, scratch, AES_BLOCK_SIZE);
|
||||
AES_ECB_encrypt(initialCounter, scratch, AES_BLOCK_SIZE,
|
||||
(const unsigned char*)aes->key, aes->rounds);
|
||||
xorbuf(x, scratch, authTagSz);
|
||||
if (authTag != NULL) {
|
||||
if (ConstantCompare(authTag, x, authTagSz) != 0) {
|
||||
return AES_GCM_AUTH_E;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* HAVE_AESGCM */
|
||||
#endif /* !WOLFSSL_ARMASM_NO_HW_CRYPTO */
|
||||
#endif /* !NO_AES && WOLFSSL_ARMASM */
|
||||
|
@ -989,11 +989,11 @@ static WC_INLINE int wc_Chacha_encrypt_256(const word32 input[CHACHA_CHUNK_WORDS
|
||||
"VMOV d4, r8, r9 \n\t"
|
||||
"STRD r10, r11, [sp, #4*10] \n\t"
|
||||
"VMOV d5, r10, r11 \n\t"
|
||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 8)
|
||||
"LDR r11, [r14, #4*14] \n\t"
|
||||
"LDR r10, [r14, #4*15] \n\t"
|
||||
#if defined(WOLFSSL_ARM_ARCH) && (WOLFSSL_ARM_ARCH < 7)
|
||||
"LDR r10, [r14, #4*14] \n\t"
|
||||
"LDR r11, [r14, #4*15] \n\t"
|
||||
#else
|
||||
"LDRD r11, r10, [r14, #4*14] \n\t"
|
||||
"LDRD r10, r11, [r14, #4*14] \n\t"
|
||||
#endif
|
||||
"VMOV q4, q0 \n\t"
|
||||
"VMOV q5, q1 \n\t"
|
||||
@ -1003,10 +1003,11 @@ static WC_INLINE int wc_Chacha_encrypt_256(const word32 input[CHACHA_CHUNK_WORDS
|
||||
"VMOV q10, q2 \n\t"
|
||||
// r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12
|
||||
// 0 1 2 3 4 5 6 7 8 9 15 14 12
|
||||
"VMOV d7, r11, r10 \n\t"
|
||||
"STR r10, [sp, #4*15] \n\t"
|
||||
"VMOV d15, r11, r10 \n\t"
|
||||
"VMOV d23, r11, r10 \n\t"
|
||||
"VMOV d7, r10, r11 \n\t"
|
||||
"STR r11, [sp, #4*15] \n\t"
|
||||
"VMOV d15, r10, r11 \n\t"
|
||||
"VMOV d23, r10, r11 \n\t"
|
||||
"MOV r11, r10 \n\t"
|
||||
"MOV r10, r12 \n\t"
|
||||
"MOV r12, r11 \n\t"
|
||||
"LDR r11, [r14, #4*13] \n\t"
|
||||
|
Reference in New Issue
Block a user