mirror of
https://github.com/wolfSSL/wolfssl.git
synced 2025-07-31 11:17:29 +02:00
Refactor pointer manipulation to be independent of datatype width
Tested with `./configure CFLAGS="-DNO_64BIT" --disable-sha512 --disable-sha384 --enable-harden` on a 64-bit machine
This commit is contained in:
@ -150,6 +150,7 @@ fi
|
||||
AC_PROG_INSTALL
|
||||
AC_TYPE_SIZE_T
|
||||
AC_TYPE_UINT8_T
|
||||
AC_TYPE_UINTPTR_T
|
||||
AM_PROG_AS
|
||||
LT_LIB_M
|
||||
|
||||
|
@ -565,7 +565,7 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits
|
||||
{
|
||||
if (wolfSSL_CryptHwMutexLock() == 0) {
|
||||
#ifdef FREESCALE_MMCAU_CLASSIC
|
||||
if ((wolfssl_word)outBlock % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
if ((wc_ptr_t)outBlock % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
WOLFSSL_MSG("Bad cau_aes_encrypt alignment");
|
||||
return BAD_ALIGN_E;
|
||||
}
|
||||
@ -583,7 +583,7 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits
|
||||
{
|
||||
if (wolfSSL_CryptHwMutexLock() == 0) {
|
||||
#ifdef FREESCALE_MMCAU_CLASSIC
|
||||
if ((wolfssl_word)outBlock % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
if ((wc_ptr_t)outBlock % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
WOLFSSL_MSG("Bad cau_aes_decrypt alignment");
|
||||
return BAD_ALIGN_E;
|
||||
}
|
||||
@ -1728,14 +1728,14 @@ static void wc_AesEncrypt(Aes* aes, const byte* inBlock, byte* outBlock)
|
||||
#endif
|
||||
|
||||
/* check alignment, decrypt doesn't need alignment */
|
||||
if ((wolfssl_word)inBlock % AESNI_ALIGN) {
|
||||
if ((wc_ptr_t)inBlock % AESNI_ALIGN) {
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp = (byte*)XMALLOC(AES_BLOCK_SIZE + AESNI_ALIGN, aes->heap,
|
||||
DYNAMIC_TYPE_TMP_BUFFER);
|
||||
byte* tmp_align;
|
||||
if (tmp == NULL) return;
|
||||
|
||||
tmp_align = tmp + (AESNI_ALIGN - ((size_t)tmp % AESNI_ALIGN));
|
||||
tmp_align = tmp + (AESNI_ALIGN - ((wc_ptr_t)tmp % AESNI_ALIGN));
|
||||
|
||||
XMEMCPY(tmp_align, inBlock, AES_BLOCK_SIZE);
|
||||
SAVE_VECTOR_REGISTERS();
|
||||
@ -2523,7 +2523,7 @@ static void wc_AesDecrypt(Aes* aes, const byte* inBlock, byte* outBlock)
|
||||
aes->rounds = keylen/4 + 6;
|
||||
|
||||
#ifdef FREESCALE_MMCAU_CLASSIC
|
||||
if ((wolfssl_word)userKey % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
if ((wc_ptr_t)userKey % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp = (byte*)XMALLOC(keylen + WOLFSSL_MMCAU_ALIGNMENT,
|
||||
aes->heap, DYNAMIC_TYPE_TMP_BUFFER);
|
||||
@ -2531,7 +2531,7 @@ static void wc_AesDecrypt(Aes* aes, const byte* inBlock, byte* outBlock)
|
||||
return MEMORY_E;
|
||||
}
|
||||
alignOffset = WOLFSSL_MMCAU_ALIGNMENT -
|
||||
((wolfssl_word)tmp % WOLFSSL_MMCAU_ALIGNMENT);
|
||||
((wc_ptr_t)tmp % WOLFSSL_MMCAU_ALIGNMENT);
|
||||
tmpKey = tmp + alignOffset;
|
||||
XMEMCPY(tmpKey, userKey, keylen);
|
||||
tmpKeyDynamic = 1;
|
||||
@ -3825,14 +3825,14 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
|
||||
#endif
|
||||
|
||||
/* check alignment, decrypt doesn't need alignment */
|
||||
if ((wolfssl_word)in % AESNI_ALIGN) {
|
||||
if ((wc_ptr_t)in % AESNI_ALIGN) {
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp = (byte*)XMALLOC(sz + AES_BLOCK_SIZE + AESNI_ALIGN,
|
||||
aes->heap, DYNAMIC_TYPE_TMP_BUFFER);
|
||||
byte* tmp_align;
|
||||
if (tmp == NULL) return MEMORY_E;
|
||||
|
||||
tmp_align = tmp + (AESNI_ALIGN - ((size_t)tmp % AESNI_ALIGN));
|
||||
tmp_align = tmp + (AESNI_ALIGN - ((wc_ptr_t)tmp % AESNI_ALIGN));
|
||||
XMEMCPY(tmp_align, in, sz);
|
||||
SAVE_VECTOR_REGISTERS();
|
||||
AES_CBC_encrypt(tmp_align, tmp_align, (byte*)aes->reg, sz,
|
||||
|
@ -152,7 +152,7 @@ int wc_Chacha_SetKey(ChaCha* ctx, const byte* key, word32 keySz)
|
||||
return BAD_FUNC_ARG;
|
||||
|
||||
#ifdef XSTREAM_ALIGN
|
||||
if ((wolfssl_word)key % 4) {
|
||||
if ((wc_ptr_t)key % 4) {
|
||||
WOLFSSL_MSG("wc_ChachaSetKey unaligned key");
|
||||
XMEMCPY(alignKey, key, keySz);
|
||||
k = (byte*)alignKey;
|
||||
|
@ -888,7 +888,7 @@
|
||||
iv = (byte*)des->reg;
|
||||
|
||||
#ifdef FREESCALE_MMCAU_CLASSIC
|
||||
if ((wolfssl_word)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
if ((wc_ptr_t)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
WOLFSSL_MSG("Bad cau_des_encrypt alignment");
|
||||
return BAD_ALIGN_E;
|
||||
}
|
||||
@ -935,7 +935,7 @@
|
||||
iv = (byte*)des->reg;
|
||||
|
||||
#ifdef FREESCALE_MMCAU_CLASSIC
|
||||
if ((wolfssl_word)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
if ((wc_ptr_t)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
WOLFSSL_MSG("Bad cau_des_decrypt alignment");
|
||||
return BAD_ALIGN_E;
|
||||
}
|
||||
@ -984,7 +984,7 @@
|
||||
iv = (byte*)des->reg;
|
||||
|
||||
#ifdef FREESCALE_MMCAU_CLASSIC
|
||||
if ((wolfssl_word)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
if ((wc_ptr_t)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
WOLFSSL_MSG("Bad 3ede cau_des_encrypt alignment");
|
||||
return BAD_ALIGN_E;
|
||||
}
|
||||
@ -1036,7 +1036,7 @@
|
||||
iv = (byte*)des->reg;
|
||||
|
||||
#ifdef FREESCALE_MMCAU_CLASSIC
|
||||
if ((wolfssl_word)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
if ((wc_ptr_t)out % WOLFSSL_MMCAU_ALIGNMENT) {
|
||||
WOLFSSL_MSG("Bad 3ede cau_des_decrypt alignment");
|
||||
return BAD_ALIGN_E;
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ int wc_Hc128_SetKey(HC128* ctx, const byte* key, const byte* iv)
|
||||
ctx->heap = NULL;
|
||||
#endif /* WOLFSSL_HEAP_TEST */
|
||||
|
||||
if ((wolfssl_word)key % 4) {
|
||||
if ((wc_ptr_t)key % 4) {
|
||||
int alignKey[4];
|
||||
|
||||
/* iv gets aligned in SetIV */
|
||||
@ -393,7 +393,7 @@ int wc_Hc128_Process(HC128* ctx, byte* output, const byte* input, word32 msglen)
|
||||
}
|
||||
|
||||
#ifdef XSTREAM_ALIGN
|
||||
if ((wolfssl_word)input % 4 || (wolfssl_word)output % 4) {
|
||||
if ((wc_ptr_t)input % 4 || (wc_ptr_t)output % 4) {
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp;
|
||||
WOLFSSL_MSG("Hc128Process unaligned");
|
||||
|
@ -139,7 +139,7 @@ static int Transform_Len(wc_Md5* md5, const byte* data, word32 len)
|
||||
int ret = wolfSSL_CryptHwMutexLock();
|
||||
if (ret == 0) {
|
||||
#if defined(WC_HASH_DATA_ALIGNMENT) && WC_HASH_DATA_ALIGNMENT > 0
|
||||
if ((size_t)data % WC_HASH_DATA_ALIGNMENT) {
|
||||
if ((wc_ptr_t)data % WC_HASH_DATA_ALIGNMENT) {
|
||||
/* data pointer is NOT aligned,
|
||||
* so copy and perform one block at a time */
|
||||
byte* local = (byte*)md5->buffer;
|
||||
@ -405,7 +405,7 @@ int wc_Md5Update(wc_Md5* md5, const byte* data, word32 len)
|
||||
/* optimization to avoid memcpy if data pointer is properly aligned */
|
||||
/* Big Endian requires byte swap, so can't use data directly */
|
||||
#if defined(WC_HASH_DATA_ALIGNMENT) && !defined(BIG_ENDIAN_ORDER)
|
||||
if (((size_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
|
||||
if (((wc_ptr_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
|
||||
local32 = (word32*)data;
|
||||
}
|
||||
else
|
||||
|
@ -392,7 +392,7 @@ int wolfSSL_load_static_memory(byte* buffer, word32 sz, int flag,
|
||||
}
|
||||
|
||||
/* align pt */
|
||||
while ((wolfssl_word)pt % WOLFSSL_STATIC_ALIGN && pt < (buffer + sz)) {
|
||||
while ((wc_ptr_t)pt % WOLFSSL_STATIC_ALIGN && pt < (buffer + sz)) {
|
||||
*pt = 0x00;
|
||||
pt++;
|
||||
ava--;
|
||||
@ -475,7 +475,7 @@ int wolfSSL_StaticBufferSz(byte* buffer, word32 sz, int flag)
|
||||
}
|
||||
|
||||
/* align pt */
|
||||
while ((wolfssl_word)pt % WOLFSSL_STATIC_ALIGN && pt < (buffer + sz)) {
|
||||
while ((wc_ptr_t)pt % WOLFSSL_STATIC_ALIGN && pt < (buffer + sz)) {
|
||||
pt++;
|
||||
ava--;
|
||||
}
|
||||
@ -716,7 +716,7 @@ void* wolfSSL_Malloc(size_t size, void* heap, int type)
|
||||
}
|
||||
|
||||
#ifdef WOLFSSL_MALLOC_CHECK
|
||||
if ((wolfssl_word)res % WOLFSSL_STATIC_ALIGN) {
|
||||
if ((wc_ptr_t)res % WOLFSSL_STATIC_ALIGN) {
|
||||
WOLFSSL_MSG("ERROR memory is not aligned");
|
||||
res = NULL;
|
||||
}
|
||||
@ -935,7 +935,7 @@ void* wolfSSL_Realloc(void *ptr, size_t size, void* heap, int type)
|
||||
}
|
||||
|
||||
#ifdef WOLFSSL_MALLOC_CHECK
|
||||
if ((wolfssl_word)res % WOLFSSL_STATIC_ALIGN) {
|
||||
if ((wc_ptr_t)res % WOLFSSL_STATIC_ALIGN) {
|
||||
WOLFSSL_MSG("ERROR memory is not aligned");
|
||||
res = NULL;
|
||||
}
|
||||
@ -1113,7 +1113,7 @@ void __attribute__((no_instrument_function))
|
||||
__cyg_profile_func_enter(void *func, void *caller)
|
||||
{
|
||||
register void* sp asm("sp");
|
||||
fprintf(stderr, "ENTER: %016lx %p\n", (unsigned long)(size_t)func, sp);
|
||||
fprintf(stderr, "ENTER: %016lx %p\n", (unsigned long)(wc_ptr_t)func, sp);
|
||||
(void)caller;
|
||||
}
|
||||
|
||||
@ -1121,7 +1121,7 @@ void __attribute__((no_instrument_function))
|
||||
__cyg_profile_func_exit(void *func, void *caller)
|
||||
{
|
||||
register void* sp asm("sp");
|
||||
fprintf(stderr, "EXIT: %016lx %p\n", (unsigned long)(size_t)func, sp);
|
||||
fprintf(stderr, "EXIT: %016lx %p\n", (unsigned long)(wc_ptr_t)func, sp);
|
||||
(void)caller;
|
||||
}
|
||||
#endif
|
||||
|
@ -255,7 +255,7 @@ counts, placing the result in <*buf>. */
|
||||
WC_STATIC WC_INLINE void xorbufout(void*out, const void* buf, const void* mask,
|
||||
word32 count)
|
||||
{
|
||||
if (((wolfssl_word)out | (wolfssl_word)buf | (wolfssl_word)mask | count) % \
|
||||
if (((wc_ptr_t)out | (wc_ptr_t)buf | (wc_ptr_t)mask | count) %
|
||||
WOLFSSL_WORD_SIZE == 0)
|
||||
XorWordsOut( (wolfssl_word*)out, (wolfssl_word*)buf,
|
||||
(const wolfssl_word*)mask, count / WOLFSSL_WORD_SIZE);
|
||||
@ -283,7 +283,7 @@ counts, placing the result in <*buf>. */
|
||||
|
||||
WC_STATIC WC_INLINE void xorbuf(void* buf, const void* mask, word32 count)
|
||||
{
|
||||
if (((wolfssl_word)buf | (wolfssl_word)mask | count) % WOLFSSL_WORD_SIZE == 0)
|
||||
if (((wc_ptr_t)buf | (wc_ptr_t)mask | count) % WOLFSSL_WORD_SIZE == 0)
|
||||
XorWords( (wolfssl_word*)buf,
|
||||
(const wolfssl_word*)mask, count / WOLFSSL_WORD_SIZE);
|
||||
else {
|
||||
|
@ -588,7 +588,7 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz,
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp = NULL;
|
||||
#endif
|
||||
if ((wolfssl_word)in % WOLFSSL_XILINX_ALIGN) {
|
||||
if ((wc_ptr_t)in % WOLFSSL_XILINX_ALIGN) {
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp_align;
|
||||
tmp = (byte*)XMALLOC(sz + WOLFSSL_XILINX_ALIGN +
|
||||
@ -787,7 +787,7 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz,
|
||||
|
||||
/* it is assumed that in buffer size is large enough to hold TAG */
|
||||
XMEMCPY((byte*)in + sz, tag, AES_BLOCK_SIZE);
|
||||
if ((wolfssl_word)in % WOLFSSL_XILINX_ALIGN) {
|
||||
if ((wc_ptr_t)in % WOLFSSL_XILINX_ALIGN) {
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp_align;
|
||||
tmp = (byte*)XMALLOC(sz + WOLFSSL_XILINX_ALIGN +
|
||||
|
@ -124,7 +124,7 @@ int wc_Chacha_SetKey(ChaCha* ctx, const byte* key, word32 keySz)
|
||||
return BAD_FUNC_ARG;
|
||||
|
||||
#ifdef XSTREAM_ALIGN
|
||||
if ((wolfssl_word)key % 4) {
|
||||
if ((wc_ptr_t)key % 4) {
|
||||
WOLFSSL_MSG("wc_ChachaSetKey unaligned key");
|
||||
XMEMCPY(alignKey, key, keySz);
|
||||
k = (byte*)alignKey;
|
||||
|
@ -230,7 +230,7 @@ int wc_RabbitSetKey(Rabbit* ctx, const byte* key, const byte* iv)
|
||||
ctx->heap = NULL;
|
||||
#endif /* WOLFSSL_HEAP_TEST */
|
||||
|
||||
if ((wolfssl_word)key % 4) {
|
||||
if ((wc_ptr_t)key % 4) {
|
||||
int alignKey[4];
|
||||
|
||||
/* iv aligned in SetIV */
|
||||
@ -314,7 +314,7 @@ int wc_RabbitProcess(Rabbit* ctx, byte* output, const byte* input, word32 msglen
|
||||
}
|
||||
|
||||
#ifdef XSTREAM_ALIGN
|
||||
if ((wolfssl_word)input % 4 || (wolfssl_word)output % 4) {
|
||||
if ((wc_ptr_t)input % 4 || (wc_ptr_t)output % 4) {
|
||||
#ifndef NO_WOLFSSL_ALLOC_ALIGN
|
||||
byte* tmp;
|
||||
WOLFSSL_MSG("wc_RabbitProcess unaligned");
|
||||
|
@ -1608,7 +1608,7 @@ static int wc_GenerateRand_IntelRD(OS_Seed* os, byte* output, word32 sz)
|
||||
{
|
||||
/* If not aligned or there is odd/remainder */
|
||||
if( (i + sizeof(CUSTOM_RAND_TYPE)) > sz ||
|
||||
((wolfssl_word)&output[i] % sizeof(CUSTOM_RAND_TYPE)) != 0
|
||||
((wc_ptr_t)&output[i] % sizeof(CUSTOM_RAND_TYPE)) != 0
|
||||
) {
|
||||
/* Single byte at a time */
|
||||
output[i++] = (byte)CUSTOM_RAND_GENERATE();
|
||||
@ -1961,7 +1961,7 @@ int wc_GenerateSeed(OS_Seed* os, byte* output, word32 sz)
|
||||
while (i < sz) {
|
||||
/* If not aligned or there is odd/remainder */
|
||||
if( (i + sizeof(word32)) > sz ||
|
||||
((wolfssl_word)&output[i] % sizeof(word32)) != 0
|
||||
((wc_ptr_t)&output[i] % sizeof(word32)) != 0
|
||||
) {
|
||||
/* Single byte at a time */
|
||||
uint32_t tmpRng = 0;
|
||||
|
@ -2535,11 +2535,11 @@ static int sakke_modexp_loop(SakkeKey* key, mp_int* b, mp_int* e, mp_proj* r,
|
||||
#else
|
||||
err = sakke_proj_mul_qx1(c[0], by, prime, mp, c[2], t1, t2);
|
||||
mp_copy(c[2]->x,
|
||||
(mp_int*) ( ((wolfssl_word)c[0]->x & wc_off_on_addr[j]) +
|
||||
((wolfssl_word)c[1]->x & wc_off_on_addr[j^1]) ) );
|
||||
(mp_int*) ( ((wc_ptr_t)c[0]->x & wc_off_on_addr[j]) +
|
||||
((wc_ptr_t)c[1]->x & wc_off_on_addr[j^1]) ) );
|
||||
mp_copy(c[2]->y,
|
||||
(mp_int*) ( ((wolfssl_word)c[0]->y & wc_off_on_addr[j]) +
|
||||
((wolfssl_word)c[1]->y & wc_off_on_addr[j^1]) ) );
|
||||
(mp_int*) ( ((wc_ptr_t)c[0]->y & wc_off_on_addr[j]) +
|
||||
((wc_ptr_t)c[1]->y & wc_off_on_addr[j^1]) ) );
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +254,7 @@
|
||||
int ret = wolfSSL_CryptHwMutexLock();
|
||||
if (ret == 0) {
|
||||
#if defined(WC_HASH_DATA_ALIGNMENT) && WC_HASH_DATA_ALIGNMENT > 0
|
||||
if ((size_t)data % WC_HASH_DATA_ALIGNMENT) {
|
||||
if ((wc_ptr_t)data % WC_HASH_DATA_ALIGNMENT) {
|
||||
/* data pointer is NOT aligned,
|
||||
* so copy and perform one block at a time */
|
||||
byte* local = (byte*)sha->buffer;
|
||||
@ -612,7 +612,7 @@ int wc_ShaUpdate(wc_Sha* sha, const byte* data, word32 len)
|
||||
/* optimization to avoid memcpy if data pointer is properly aligned */
|
||||
/* Little Endian requires byte swap, so can't use data directly */
|
||||
#if defined(WC_HASH_DATA_ALIGNMENT) && !defined(LITTLE_ENDIAN_ORDER)
|
||||
if (((size_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
|
||||
if (((wc_ptr_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
|
||||
local32 = (word32*)data;
|
||||
}
|
||||
else
|
||||
|
@ -498,7 +498,7 @@ static int InitSha256(wc_Sha256* sha256)
|
||||
int ret = wolfSSL_CryptHwMutexLock();
|
||||
if (ret == 0) {
|
||||
#if defined(WC_HASH_DATA_ALIGNMENT) && WC_HASH_DATA_ALIGNMENT > 0
|
||||
if ((size_t)data % WC_HASH_DATA_ALIGNMENT) {
|
||||
if ((wc_ptr_t)data % WC_HASH_DATA_ALIGNMENT) {
|
||||
/* data pointer is NOT aligned,
|
||||
* so copy and perform one block at a time */
|
||||
byte* local = (byte*)sha256->buffer;
|
||||
@ -1055,7 +1055,7 @@ static int InitSha256(wc_Sha256* sha256)
|
||||
#if defined(WC_HASH_DATA_ALIGNMENT) && !defined(LITTLE_ENDIAN_ORDER) && \
|
||||
!(defined(USE_INTEL_SPEEDUP) && \
|
||||
(defined(HAVE_INTEL_AVX1) || defined(HAVE_INTEL_AVX2)))
|
||||
if (((size_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
|
||||
if (((wc_ptr_t)data % WC_HASH_DATA_ALIGNMENT) == 0) {
|
||||
local32 = (word32*)data;
|
||||
}
|
||||
else
|
||||
|
@ -1840,8 +1840,8 @@ int fp_exptmod_nb(exptModNb_t* nb, fp_int* G, fp_int* X, fp_int* P, fp_int* Y)
|
||||
#ifdef WC_NO_CACHE_RESISTANT
|
||||
err = fp_sqr(&nb->R[nb->y], &nb->R[nb->y]);
|
||||
#else
|
||||
fp_copy((fp_int*) ( ((wolfssl_word)&nb->R[0] & wc_off_on_addr[nb->y^1]) +
|
||||
((wolfssl_word)&nb->R[1] & wc_off_on_addr[nb->y]) ),
|
||||
fp_copy((fp_int*) ( ((wc_ptr_t)&nb->R[0] & wc_off_on_addr[nb->y^1]) +
|
||||
((wc_ptr_t)&nb->R[1] & wc_off_on_addr[nb->y]) ),
|
||||
&nb->R[2]);
|
||||
err = fp_sqr(&nb->R[2], &nb->R[2]);
|
||||
#endif /* WC_NO_CACHE_RESISTANT */
|
||||
@ -1859,8 +1859,8 @@ int fp_exptmod_nb(exptModNb_t* nb, fp_int* G, fp_int* X, fp_int* P, fp_int* Y)
|
||||
#else
|
||||
fp_montgomery_reduce(&nb->R[2], P, nb->mp);
|
||||
fp_copy(&nb->R[2],
|
||||
(fp_int*) ( ((wolfssl_word)&nb->R[0] & wc_off_on_addr[nb->y^1]) +
|
||||
((wolfssl_word)&nb->R[1] & wc_off_on_addr[nb->y]) ) );
|
||||
(fp_int*) ( ((wc_ptr_t)&nb->R[0] & wc_off_on_addr[nb->y^1]) +
|
||||
((wc_ptr_t)&nb->R[1] & wc_off_on_addr[nb->y]) ) );
|
||||
#endif /* WC_NO_CACHE_RESISTANT */
|
||||
|
||||
nb->state = TFM_EXPTMOD_NB_NEXT;
|
||||
@ -2033,14 +2033,14 @@ static int _fp_exptmod_ct(fp_int * G, fp_int * X, int digits, fp_int * P,
|
||||
* use R[2] as temp, make sure address calc is constant, keep
|
||||
* &R[0] and &R[1] in cache */
|
||||
fp_copy(&R[2],
|
||||
(fp_int*) ( ((wolfssl_word)&R[0] & wc_off_on_addr[y]) +
|
||||
((wolfssl_word)&R[1] & wc_off_on_addr[y^1]) ) );
|
||||
(fp_int*) ( ((wc_ptr_t)&R[0] & wc_off_on_addr[y]) +
|
||||
((wc_ptr_t)&R[1] & wc_off_on_addr[y^1]) ) );
|
||||
|
||||
/* instead of using R[y] for sqr, which leaks key bit to cache monitor,
|
||||
* use R[2] as temp, make sure address calc is constant, keep
|
||||
* &R[0] and &R[1] in cache */
|
||||
fp_copy((fp_int*) ( ((wolfssl_word)&R[0] & wc_off_on_addr[y^1]) +
|
||||
((wolfssl_word)&R[1] & wc_off_on_addr[y]) ),
|
||||
fp_copy((fp_int*) ( ((wc_ptr_t)&R[0] & wc_off_on_addr[y^1]) +
|
||||
((wc_ptr_t)&R[1] & wc_off_on_addr[y]) ),
|
||||
&R[2]);
|
||||
err = fp_sqr(&R[2], &R[2]);
|
||||
if (err != FP_OKAY) {
|
||||
@ -2057,8 +2057,8 @@ static int _fp_exptmod_ct(fp_int * G, fp_int * X, int digits, fp_int * P,
|
||||
return err;
|
||||
}
|
||||
fp_copy(&R[2],
|
||||
(fp_int*) ( ((wolfssl_word)&R[0] & wc_off_on_addr[y^1]) +
|
||||
((wolfssl_word)&R[1] & wc_off_on_addr[y]) ) );
|
||||
(fp_int*) ( ((wc_ptr_t)&R[0] & wc_off_on_addr[y^1]) +
|
||||
((wc_ptr_t)&R[1] & wc_off_on_addr[y]) ) );
|
||||
#endif /* WC_NO_CACHE_RESISTANT */
|
||||
}
|
||||
|
||||
|
@ -54,7 +54,7 @@
|
||||
|
||||
/* all off / all on pointer addresses for constant calculations */
|
||||
/* ecc.c uses same table */
|
||||
const wolfssl_word wc_off_on_addr[2] =
|
||||
const wc_ptr_t wc_off_on_addr[2] =
|
||||
{
|
||||
#if defined(WC_64BIT_CPU)
|
||||
W64LIT(0x0000000000000000),
|
||||
|
@ -11778,7 +11778,7 @@ WOLFSSL_TEST_SUBROUTINE int memory_test(void)
|
||||
word32 size[] = { WOLFMEM_BUCKETS };
|
||||
word32 dist[] = { WOLFMEM_DIST };
|
||||
byte buffer[30000]; /* make large enough to involve many bucket sizes */
|
||||
int pad = -(int)((wolfssl_word)buffer) & (WOLFSSL_STATIC_ALIGN - 1);
|
||||
int pad = -(int)((wc_ptr_t)buffer) & (WOLFSSL_STATIC_ALIGN - 1);
|
||||
/* pad to account for if head of buffer is not at set memory
|
||||
* alignment when tests are ran */
|
||||
#endif
|
||||
|
@ -154,13 +154,12 @@ decouple library dependencies with standard string, memory and so on.
|
||||
typedef unsigned long long word64;
|
||||
#endif
|
||||
|
||||
#if !defined(NO_64BIT) && defined(WORD64_AVAILABLE) && !defined(WC_16BIT_CPU)
|
||||
#if defined(WORD64_AVAILABLE) && !defined(WC_16BIT_CPU)
|
||||
/* These platforms have 64-bit CPU registers. */
|
||||
#if (defined(__alpha__) || defined(__ia64__) || defined(_ARCH_PPC64) || \
|
||||
defined(__mips64) || defined(__x86_64__) || defined(_M_X64)) || \
|
||||
defined(__aarch64__) || defined(__sparc64__) || defined(__s390x__ ) || \
|
||||
(defined(__riscv_xlen) && (__riscv_xlen == 64)) || defined(_M_ARM64)
|
||||
typedef word64 wolfssl_word;
|
||||
#define WC_64BIT_CPU
|
||||
#elif (defined(sun) || defined(__sun)) && \
|
||||
(defined(LP64) || defined(_LP64))
|
||||
@ -168,16 +167,25 @@ decouple library dependencies with standard string, memory and so on.
|
||||
* and int uses 32 bits. When using Solaris Studio sparc and __sparc are
|
||||
* available for 32 bit detection but __sparc64__ could be missed. This
|
||||
* uses LP64 for checking 64 bit CPU arch. */
|
||||
typedef word64 wolfssl_word;
|
||||
#define WC_64BIT_CPU
|
||||
#else
|
||||
typedef word32 wolfssl_word;
|
||||
#ifdef WORD64_AVAILABLE
|
||||
#define WOLFCRYPT_SLOW_WORD64
|
||||
#endif
|
||||
#define WC_32BIT_CPU
|
||||
#endif
|
||||
|
||||
#if defined(NO_64BIT)
|
||||
typedef word32 wolfssl_word;
|
||||
#undef WORD64_AVAILABLE
|
||||
#else
|
||||
#ifdef WC_64BIT_CPU
|
||||
typedef word64 wolfssl_word;
|
||||
#else
|
||||
typedef word32 wolfssl_word;
|
||||
#ifdef WORD64_AVAILABLE
|
||||
#define WOLFCRYPT_SLOW_WORD64
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#elif defined(WC_16BIT_CPU)
|
||||
#undef WORD64_AVAILABLE
|
||||
typedef word16 wolfssl_word;
|
||||
@ -189,7 +197,15 @@ decouple library dependencies with standard string, memory and so on.
|
||||
typedef word32 wolfssl_word;
|
||||
#define MP_16BIT /* for mp_int, mp_word needs to be twice as big as
|
||||
mp_digit, no 64 bit type so make mp_digit 16 bit */
|
||||
#define WC_32BIT_CPU
|
||||
#endif
|
||||
|
||||
#ifdef WC_PTR_TYPE /* Allow user suppied type */
|
||||
typedef WC_PTR_TYPE wc_ptr_t;
|
||||
#elif defined(HAVE_UINTPTR_T)
|
||||
#include <stdint.h>
|
||||
typedef uintptr_t wc_ptr_t;
|
||||
#else /* fallback to architecture size_t for pointer size */
|
||||
typedef size_t wc_ptr_t;
|
||||
#endif
|
||||
|
||||
enum {
|
||||
|
@ -50,7 +50,7 @@ This library provides big integer math functions.
|
||||
((defined(HAVE_ECC) && defined(ECC_TIMING_RESISTANT)) || \
|
||||
(defined(USE_FAST_MATH) && defined(TFM_TIMING_RESISTANT)))
|
||||
|
||||
extern const wolfssl_word wc_off_on_addr[2];
|
||||
extern const wc_ptr_t wc_off_on_addr[2];
|
||||
#endif
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user