linuxkm: cleanups and smallstack refactors related to WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED, associated linuxkm-SIMD-IRQ PR, and associated peer review:

smallstack refactors for wolfcrypt/src/rsa.c:wc_CheckProbablePrime_ex() and wolfcrypt/src/pwdbased.c:wc_PKCS12_PBKDF_ex();

add WARN_UNUSED_RESULT macro to types.h;

text format cleanup;

fix internal.c:LowResTimer() implementation.

refactor tls13.c:TimeNowInMilliseconds() for kernel 4.9 and 3.16 compat.

use ktime_get_coarse_real_ts64() only for kernel 5.x+.  in kernel 4.x, use its older form, current_kernel_time64(), and in 3.x, use getnstimeofday().

linuxkm/module_hooks.c: fix wolfssl_init() pie code to be compatible with kernel 4.4-;

fix allocate_wolfcrypt_irq_fpu_states() return codes to all be wolfcrypt codes, and in calling code, pass up that code (suggested by dgarske peer review).
This commit is contained in:
Daniel Pouzzner
2021-09-20 13:46:51 -05:00
parent ec21dd6d13
commit 6d715130a2
16 changed files with 415 additions and 212 deletions

View File

@ -123,6 +123,17 @@ static int wolfssl_init(void)
#endif #endif
#ifdef HAVE_LINUXKM_PIE_SUPPORT #ifdef HAVE_LINUXKM_PIE_SUPPORT
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
#define THIS_MODULE_BASE (THIS_MODULE->core_layout.base)
#define THIS_MODULE_TEXT_SIZE (THIS_MODULE->core_layout.text_size)
#define THIS_MODULE_RO_SIZE (THIS_MODULE->core_layout.ro_size)
#else
#define THIS_MODULE_BASE (THIS_MODULE->module_core)
#define THIS_MODULE_TEXT_SIZE (THIS_MODULE->core_text_size)
#define THIS_MODULE_RO_SIZE (THIS_MODULE->core_ro_size)
#endif
{ {
char *pie_text_start = (char *)wolfCrypt_PIE_first_function; char *pie_text_start = (char *)wolfCrypt_PIE_first_function;
char *pie_text_end = (char *)wolfCrypt_PIE_last_function; char *pie_text_end = (char *)wolfCrypt_PIE_last_function;
@ -131,8 +142,8 @@ static int wolfssl_init(void)
unsigned int text_hash, rodata_hash; unsigned int text_hash, rodata_hash;
if ((pie_text_start < pie_text_end) && if ((pie_text_start < pie_text_end) &&
(pie_text_start >= (char *)(THIS_MODULE->core_layout.base)) && (pie_text_start >= (char *)THIS_MODULE_BASE) &&
(pie_text_end - (char *)(THIS_MODULE->core_layout.base) <= THIS_MODULE->core_layout.text_size)) (pie_text_end - (char *)THIS_MODULE_BASE <= THIS_MODULE_TEXT_SIZE))
{ {
text_hash = hash_span(pie_text_start, pie_text_end); text_hash = hash_span(pie_text_start, pie_text_end);
} else { } else {
@ -141,14 +152,14 @@ static int wolfssl_init(void)
pie_text_start, pie_text_start,
pie_text_end, pie_text_end,
pie_text_end-pie_text_start, pie_text_end-pie_text_start,
THIS_MODULE->core_layout.base, THIS_MODULE_BASE,
(char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.text_size); (char *)THIS_MODULE_BASE + THIS_MODULE_TEXT_SIZE);
text_hash = 0; text_hash = 0;
} }
if ((pie_rodata_start < pie_rodata_end) && if ((pie_rodata_start < pie_rodata_end) &&
(pie_rodata_start >= (char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.text_size) && (pie_rodata_start >= (char *)THIS_MODULE_BASE + THIS_MODULE_TEXT_SIZE) &&
(pie_rodata_end - (char *)(THIS_MODULE->core_layout.base) <= THIS_MODULE->core_layout.ro_size)) (pie_rodata_end - (char *)THIS_MODULE_BASE <= THIS_MODULE_RO_SIZE))
{ {
rodata_hash = hash_span(pie_rodata_start, pie_rodata_end); rodata_hash = hash_span(pie_rodata_start, pie_rodata_end);
} else { } else {
@ -157,8 +168,8 @@ static int wolfssl_init(void)
pie_rodata_start, pie_rodata_start,
pie_rodata_end, pie_rodata_end,
pie_rodata_end-pie_rodata_start, pie_rodata_end-pie_rodata_start,
(char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.text_size, (char *)THIS_MODULE_BASE + THIS_MODULE_TEXT_SIZE,
(char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.ro_size); (char *)THIS_MODULE_BASE + THIS_MODULE_RO_SIZE);
rodata_hash = 0; rodata_hash = 0;
} }
@ -169,7 +180,7 @@ static int wolfssl_init(void)
pr_info("wolfCrypt container hashes (spans): %x (%lu) %x (%lu), module base %pK\n", pr_info("wolfCrypt container hashes (spans): %x (%lu) %x (%lu), module base %pK\n",
text_hash, pie_text_end-pie_text_start, text_hash, pie_text_end-pie_text_start,
rodata_hash, pie_rodata_end-pie_rodata_start, rodata_hash, pie_rodata_end-pie_rodata_start,
THIS_MODULE->core_layout.base); THIS_MODULE_BASE);
} }
#endif /* HAVE_LINUXKM_PIE_SUPPORT */ #endif /* HAVE_LINUXKM_PIE_SUPPORT */
@ -349,8 +360,16 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
kmalloc_order_trace; kmalloc_order_trace;
wolfssl_linuxkm_pie_redirect_table.get_random_bytes = get_random_bytes; wolfssl_linuxkm_pie_redirect_table.get_random_bytes = get_random_bytes;
wolfssl_linuxkm_pie_redirect_table.ktime_get_coarse_real_ts64 = #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
ktime_get_coarse_real_ts64; wolfssl_linuxkm_pie_redirect_table.getnstimeofday =
getnstimeofday;
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
wolfssl_linuxkm_pie_redirect_table.current_kernel_time64 =
current_kernel_time64;
#else
wolfssl_linuxkm_pie_redirect_table.ktime_get_coarse_real_ts64 =
ktime_get_coarse_real_ts64;
#endif
wolfssl_linuxkm_pie_redirect_table.get_current = my_get_current_thread; wolfssl_linuxkm_pie_redirect_table.get_current = my_get_current_thread;
wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count; wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count;
@ -380,8 +399,15 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) {
#endif #endif
wolfssl_linuxkm_pie_redirect_table.__mutex_init = __mutex_init; wolfssl_linuxkm_pie_redirect_table.__mutex_init = __mutex_init;
wolfssl_linuxkm_pie_redirect_table.mutex_lock = mutex_lock; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
wolfssl_linuxkm_pie_redirect_table.mutex_lock_nested = mutex_lock_nested;
#else
wolfssl_linuxkm_pie_redirect_table.mutex_lock = mutex_lock;
#endif
wolfssl_linuxkm_pie_redirect_table.mutex_unlock = mutex_unlock; wolfssl_linuxkm_pie_redirect_table.mutex_unlock = mutex_unlock;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
wolfssl_linuxkm_pie_redirect_table.mutex_destroy = mutex_destroy;
#endif
#ifdef HAVE_FIPS #ifdef HAVE_FIPS
wolfssl_linuxkm_pie_redirect_table.wolfCrypt_FIPS_first = wolfssl_linuxkm_pie_redirect_table.wolfCrypt_FIPS_first =

View File

@ -8269,11 +8269,9 @@ ProtocolVersion MakeDTLSv1_2(void)
} }
#elif defined(WOLFSSL_LINUXKM) #elif defined(WOLFSSL_LINUXKM)
#include <linux/time.h>
#include <linux/ktime.h>
word32 LowResTimer(void) word32 LowResTimer(void)
{ {
return (word32)ktime_get_real_ns(); return (word32)time(NULL);
} }
#else #else

View File

@ -1558,19 +1558,25 @@ end:
return (word32)(uTaskerSystemTick / (TICK_RESOLUTION / 1000)); return (word32)(uTaskerSystemTick / (TICK_RESOLUTION / 1000));
} }
#elif defined(WOLFSSL_LINUXKM) #elif defined(WOLFSSL_LINUXKM)
/* The time in milliseconds.
* Used for tickets to represent difference between when first seen and when
* sending.
*
* returns the time in milliseconds as a 32-bit value.
*/
word32 TimeNowInMilliseconds(void) word32 TimeNowInMilliseconds(void)
{ {
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) s64 t;
return (word32)(ktime_get_real_ns() / (s64)1000000); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
#else struct timespec ts;
return (word32)(ktime_get_real_ns() / (ktime_t)1000000); getnstimeofday(&ts);
#endif t = ts.tv_sec * (s64)1000;
t += ts.tv_nsec / (s64)1000000;
#else
struct timespec64 ts;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
ts = current_kernel_time64();
#else
ktime_get_coarse_real_ts64(&ts);
#endif
t = ts.tv_sec * 1000L;
t += ts.tv_nsec / 1000000L;
#endif
return (word32)t;
} }
#elif defined(WOLFSSL_QNX_CAAM) #elif defined(WOLFSSL_QNX_CAAM)
word32 TimeNowInMilliseconds(void) word32 TimeNowInMilliseconds(void)

View File

@ -775,6 +775,7 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits
const int bits, Aes* aes) const int bits, Aes* aes)
{ {
int nr; int nr;
int ret;
#ifdef WOLFSSL_SMALL_STACK #ifdef WOLFSSL_SMALL_STACK
Aes *temp_key; Aes *temp_key;
#else #else
@ -805,11 +806,11 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits
nr = temp_key->rounds; nr = temp_key->rounds;
aes->rounds = nr; aes->rounds = nr;
if (SAVE_VECTOR_REGISTERS() != 0) { if ((ret = SAVE_VECTOR_REGISTERS()) != 0) {
#ifdef WOLFSSL_SMALL_STACK #ifdef WOLFSSL_SMALL_STACK
XFREE(temp_key, aes->heap, DYNAMIC_TYPE_AES); XFREE(temp_key, aes->heap, DYNAMIC_TYPE_AES);
#endif #endif
return BAD_STATE_E; return ret;
} }
Key_Schedule[nr] = Temp_Key_Schedule[0]; Key_Schedule[nr] = Temp_Key_Schedule[0];
@ -3066,8 +3067,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
__must_check int wc_AesEncryptDirect(Aes* aes, byte* out, const byte* in) __must_check int wc_AesEncryptDirect(Aes* aes, byte* out, const byte* in)
{ {
if (haveAESNI && aes->use_aesni) { if (haveAESNI && aes->use_aesni) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
} }
wc_AesEncrypt(aes, in, out); wc_AesEncrypt(aes, in, out);
if (haveAESNI && aes->use_aesni) if (haveAESNI && aes->use_aesni)
@ -3084,8 +3086,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
__must_check int wc_AesDecryptDirect(Aes* aes, byte* out, const byte* in) __must_check int wc_AesDecryptDirect(Aes* aes, byte* out, const byte* in)
{ {
if (haveAESNI && aes->use_aesni) { if (haveAESNI && aes->use_aesni) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
} }
wc_AesDecrypt(aes, in, out); wc_AesDecrypt(aes, in, out);
if (haveAESNI && aes->use_aesni) if (haveAESNI && aes->use_aesni)
@ -3869,6 +3872,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
#ifdef WOLFSSL_AESNI #ifdef WOLFSSL_AESNI
if (haveAESNI) { if (haveAESNI) {
int ret;
#ifdef DEBUG_AESNI #ifdef DEBUG_AESNI
printf("about to aes cbc encrypt\n"); printf("about to aes cbc encrypt\n");
printf("in = %p\n", in); printf("in = %p\n", in);
@ -3889,9 +3893,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
tmp_align = tmp + (AESNI_ALIGN - ((wc_ptr_t)tmp % AESNI_ALIGN)); tmp_align = tmp + (AESNI_ALIGN - ((wc_ptr_t)tmp % AESNI_ALIGN));
XMEMCPY(tmp_align, in, sz); XMEMCPY(tmp_align, in, sz);
if (SAVE_VECTOR_REGISTERS() != 0) { if ((ret = SAVE_VECTOR_REGISTERS()) != 0) {
XFREE(tmp, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); XFREE(tmp, aes->heap, DYNAMIC_TYPE_TMP_BUFFER);
return BAD_STATE_E; return ret;
} }
AES_CBC_encrypt(tmp_align, tmp_align, (byte*)aes->reg, sz, AES_CBC_encrypt(tmp_align, tmp_align, (byte*)aes->reg, sz,
(byte*)aes->key, aes->rounds); (byte*)aes->key, aes->rounds);
@ -3908,8 +3912,8 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
#endif #endif
} }
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
AES_CBC_encrypt(in, out, (byte*)aes->reg, sz, (byte*)aes->key, AES_CBC_encrypt(in, out, (byte*)aes->reg, sz, (byte*)aes->key,
aes->rounds); aes->rounds);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -3994,6 +3998,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
#ifdef WOLFSSL_AESNI #ifdef WOLFSSL_AESNI
if (haveAESNI) { if (haveAESNI) {
int ret;
#ifdef DEBUG_AESNI #ifdef DEBUG_AESNI
printf("about to aes cbc decrypt\n"); printf("about to aes cbc decrypt\n");
printf("in = %p\n", in); printf("in = %p\n", in);
@ -4006,8 +4011,8 @@ int wc_AesSetIV(Aes* aes, const byte* iv)
/* if input and output same will overwrite input iv */ /* if input and output same will overwrite input iv */
XMEMCPY(aes->tmp, in + sz - AES_BLOCK_SIZE, AES_BLOCK_SIZE); XMEMCPY(aes->tmp, in + sz - AES_BLOCK_SIZE, AES_BLOCK_SIZE);
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
#if defined(WOLFSSL_AESNI_BY4) #if defined(WOLFSSL_AESNI_BY4)
AES_CBC_decrypt_by4(in, out, (byte*)aes->reg, sz, (byte*)aes->key, AES_CBC_decrypt_by4(in, out, (byte*)aes->reg, sz, (byte*)aes->key,
aes->rounds); aes->rounds);
@ -7579,8 +7584,9 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz,
#ifdef WOLFSSL_AESNI #ifdef WOLFSSL_AESNI
#ifdef HAVE_INTEL_AVX2 #ifdef HAVE_INTEL_AVX2
if (IS_INTEL_AVX2(intel_flags)) { if (IS_INTEL_AVX2(intel_flags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
AES_GCM_encrypt_avx2(in, out, authIn, iv, authTag, sz, authInSz, ivSz, AES_GCM_encrypt_avx2(in, out, authIn, iv, authTag, sz, authInSz, ivSz,
authTagSz, (const byte*)aes->key, aes->rounds); authTagSz, (const byte*)aes->key, aes->rounds);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -7590,8 +7596,9 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz,
#endif #endif
#ifdef HAVE_INTEL_AVX1 #ifdef HAVE_INTEL_AVX1
if (IS_INTEL_AVX1(intel_flags)) { if (IS_INTEL_AVX1(intel_flags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
AES_GCM_encrypt_avx1(in, out, authIn, iv, authTag, sz, authInSz, ivSz, AES_GCM_encrypt_avx1(in, out, authIn, iv, authTag, sz, authInSz, ivSz,
authTagSz, (const byte*)aes->key, aes->rounds); authTagSz, (const byte*)aes->key, aes->rounds);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -8103,8 +8110,9 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz,
#ifdef WOLFSSL_AESNI #ifdef WOLFSSL_AESNI
#ifdef HAVE_INTEL_AVX2 #ifdef HAVE_INTEL_AVX2
if (IS_INTEL_AVX2(intel_flags)) { if (IS_INTEL_AVX2(intel_flags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
AES_GCM_decrypt_avx2(in, out, authIn, iv, authTag, sz, authInSz, ivSz, AES_GCM_decrypt_avx2(in, out, authIn, iv, authTag, sz, authInSz, ivSz,
authTagSz, (byte*)aes->key, aes->rounds, &res); authTagSz, (byte*)aes->key, aes->rounds, &res);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -8116,8 +8124,9 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz,
#endif #endif
#ifdef HAVE_INTEL_AVX1 #ifdef HAVE_INTEL_AVX1
if (IS_INTEL_AVX1(intel_flags)) { if (IS_INTEL_AVX1(intel_flags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
AES_GCM_decrypt_avx1(in, out, authIn, iv, authTag, sz, authInSz, ivSz, AES_GCM_decrypt_avx1(in, out, authIn, iv, authTag, sz, authInSz, ivSz,
authTagSz, (byte*)aes->key, aes->rounds, &res); authTagSz, (byte*)aes->key, aes->rounds, &res);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -8373,8 +8382,9 @@ static int AesGcmInit_aesni(Aes* aes, const byte* iv, word32 ivSz)
#ifdef HAVE_INTEL_AVX2 #ifdef HAVE_INTEL_AVX2
if (IS_INTEL_AVX2(intel_flags)) { if (IS_INTEL_AVX2(intel_flags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
AES_GCM_init_avx2((byte*)aes->key, aes->rounds, iv, ivSz, aes->H, AES_GCM_init_avx2((byte*)aes->key, aes->rounds, iv, ivSz, aes->H,
AES_COUNTER(aes), AES_INITCTR(aes)); AES_COUNTER(aes), AES_INITCTR(aes));
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -8383,8 +8393,9 @@ static int AesGcmInit_aesni(Aes* aes, const byte* iv, word32 ivSz)
#endif #endif
#ifdef HAVE_INTEL_AVX1 #ifdef HAVE_INTEL_AVX1
if (IS_INTEL_AVX1(intel_flags)) { if (IS_INTEL_AVX1(intel_flags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
AES_GCM_init_avx1((byte*)aes->key, aes->rounds, iv, ivSz, aes->H, AES_GCM_init_avx1((byte*)aes->key, aes->rounds, iv, ivSz, aes->H,
AES_COUNTER(aes), AES_INITCTR(aes)); AES_COUNTER(aes), AES_INITCTR(aes));
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -8392,7 +8403,8 @@ static int AesGcmInit_aesni(Aes* aes, const byte* iv, word32 ivSz)
else else
#endif #endif
{ {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
if (ret != 0)
return BAD_STATE_E; return BAD_STATE_E;
AES_GCM_init_aesni((byte*)aes->key, aes->rounds, iv, ivSz, aes->H, AES_GCM_init_aesni((byte*)aes->key, aes->rounds, iv, ivSz, aes->H,
AES_COUNTER(aes), AES_INITCTR(aes)); AES_COUNTER(aes), AES_INITCTR(aes));
@ -8532,8 +8544,9 @@ static int AesGcmEncryptUpdate_aesni(Aes* aes, byte* c, const byte* p,
word32 blocks; word32 blocks;
int partial; int partial;
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
/* Hash in A, the Authentication Data */ /* Hash in A, the Authentication Data */
AesGcmAadUpdate_aesni(aes, a, aSz, (cSz > 0) && (c != NULL)); AesGcmAadUpdate_aesni(aes, a, aSz, (cSz > 0) && (c != NULL));
@ -8659,8 +8672,9 @@ static int AesGcmEncryptFinal_aesni(Aes* aes, byte* authTag, word32 authTagSz)
/* AAD block incomplete when > 0 */ /* AAD block incomplete when > 0 */
byte over = aes->aOver; byte over = aes->aOver;
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
if (aes->cOver > 0) { if (aes->cOver > 0) {
/* Cipher text block incomplete. */ /* Cipher text block incomplete. */
over = aes->cOver; over = aes->cOver;
@ -8758,8 +8772,9 @@ static int AesGcmDecryptUpdate_aesni(Aes* aes, byte* p, const byte* c,
word32 blocks; word32 blocks;
int partial; int partial;
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
/* Hash in A, the Authentication Data */ /* Hash in A, the Authentication Data */
AesGcmAadUpdate_aesni(aes, a, aSz, (cSz > 0) && (c != NULL)); AesGcmAadUpdate_aesni(aes, a, aSz, (cSz > 0) && (c != NULL));
@ -8893,8 +8908,8 @@ static int AesGcmDecryptFinal_aesni(Aes* aes, const byte* authTag,
byte over = aes->aOver; byte over = aes->aOver;
byte *lastBlock = AES_LASTGBLOCK(aes); byte *lastBlock = AES_LASTGBLOCK(aes);
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
if (aes->cOver > 0) { if (aes->cOver > 0) {
/* Cipher text block incomplete. */ /* Cipher text block incomplete. */
over = aes->cOver; over = aes->cOver;
@ -9893,8 +9908,9 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz,
B[15] = 1; B[15] = 1;
#ifdef WOLFSSL_AESNI #ifdef WOLFSSL_AESNI
if (haveAESNI && aes->use_aesni) { if (haveAESNI && aes->use_aesni) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
while (inSz >= AES_BLOCK_SIZE * 4) { while (inSz >= AES_BLOCK_SIZE * 4) {
AesCcmCtrIncSet4(B, lenSz); AesCcmCtrIncSet4(B, lenSz);
@ -9979,8 +9995,9 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz,
#ifdef WOLFSSL_AESNI #ifdef WOLFSSL_AESNI
if (haveAESNI && aes->use_aesni) { if (haveAESNI && aes->use_aesni) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
while (oSz >= AES_BLOCK_SIZE * 4) { while (oSz >= AES_BLOCK_SIZE * 4) {
AesCcmCtrIncSet4(B, lenSz); AesCcmCtrIncSet4(B, lenSz);
@ -10392,8 +10409,8 @@ int wc_AesEcbEncrypt(Aes* aes, byte* out, const byte* in, word32 sz)
if ((in == NULL) || (out == NULL) || (aes == NULL)) if ((in == NULL) || (out == NULL) || (aes == NULL))
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
ret = _AesEcbEncrypt(aes, out, in, sz); ret = _AesEcbEncrypt(aes, out, in, sz);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -10407,8 +10424,8 @@ int wc_AesEcbDecrypt(Aes* aes, byte* out, const byte* in, word32 sz)
if ((in == NULL) || (out == NULL) || (aes == NULL)) if ((in == NULL) || (out == NULL) || (aes == NULL))
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
ret = _AesEcbDecrypt(aes, out, in, sz); ret = _AesEcbDecrypt(aes, out, in, sz);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
@ -10437,6 +10454,7 @@ static int wc_AesFeedbackEncrypt(Aes* aes, byte* out, const byte* in,
#ifdef WOLFSSL_AES_CFB #ifdef WOLFSSL_AES_CFB
byte* reg = NULL; byte* reg = NULL;
#endif #endif
int ret;
if (aes == NULL || out == NULL || in == NULL) { if (aes == NULL || out == NULL || in == NULL) {
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
@ -10462,8 +10480,8 @@ static int wc_AesFeedbackEncrypt(Aes* aes, byte* out, const byte* in,
sz--; sz--;
} }
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
while (sz >= AES_BLOCK_SIZE) { while (sz >= AES_BLOCK_SIZE) {
/* Using aes->tmp here for inline case i.e. in=out */ /* Using aes->tmp here for inline case i.e. in=out */
@ -10533,6 +10551,7 @@ static int wc_AesFeedbackDecrypt(Aes* aes, byte* out, const byte* in, word32 sz,
byte mode) byte mode)
{ {
byte* tmp; byte* tmp;
int ret;
if (aes == NULL || out == NULL || in == NULL) { if (aes == NULL || out == NULL || in == NULL) {
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
@ -10554,8 +10573,8 @@ static int wc_AesFeedbackDecrypt(Aes* aes, byte* out, const byte* in, word32 sz,
sz--; sz--;
} }
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
while (sz > AES_BLOCK_SIZE) { while (sz > AES_BLOCK_SIZE) {
/* Using aes->tmp here for inline case i.e. in=out */ /* Using aes->tmp here for inline case i.e. in=out */
@ -10673,6 +10692,7 @@ static int wc_AesFeedbackCFB8(Aes* aes, byte* out, const byte* in,
word32 sz, byte dir) word32 sz, byte dir)
{ {
byte *pt; byte *pt;
int ret;
if (aes == NULL || out == NULL || in == NULL) { if (aes == NULL || out == NULL || in == NULL) {
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
@ -10682,8 +10702,8 @@ static int wc_AesFeedbackCFB8(Aes* aes, byte* out, const byte* in,
return 0; return 0;
} }
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
while (sz > 0) { while (sz > 0) {
wc_AesEncryptDirect(aes, (byte*)aes->tmp, (byte*)aes->reg); wc_AesEncryptDirect(aes, (byte*)aes->tmp, (byte*)aes->reg);
@ -10727,6 +10747,7 @@ static int wc_AesFeedbackCFB1(Aes* aes, byte* out, const byte* in,
byte cur = 0; /* hold current work in order to handle inline in=out */ byte cur = 0; /* hold current work in order to handle inline in=out */
byte* pt; byte* pt;
int bit = 7; int bit = 7;
int ret;
if (aes == NULL || out == NULL || in == NULL) { if (aes == NULL || out == NULL || in == NULL) {
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
@ -10736,8 +10757,8 @@ static int wc_AesFeedbackCFB1(Aes* aes, byte* out, const byte* in,
return 0; return 0;
} }
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
while (sz > 0) { while (sz > 0) {
wc_AesEncryptDirect(aes, (byte*)aes->tmp, (byte*)aes->reg); wc_AesEncryptDirect(aes, (byte*)aes->tmp, (byte*)aes->reg);
@ -10942,6 +10963,8 @@ int wc_AesKeyWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out,
byte t[KEYWRAP_BLOCK_SIZE]; byte t[KEYWRAP_BLOCK_SIZE];
byte tmp[AES_BLOCK_SIZE]; byte tmp[AES_BLOCK_SIZE];
int ret;
/* n must be at least 2 64-bit blocks, output size is (n + 1) 8 bytes (64-bit) */ /* n must be at least 2 64-bit blocks, output size is (n + 1) 8 bytes (64-bit) */
if (aes == NULL || in == NULL || inSz < 2*KEYWRAP_BLOCK_SIZE || if (aes == NULL || in == NULL || inSz < 2*KEYWRAP_BLOCK_SIZE ||
out == NULL || outSz < (inSz + KEYWRAP_BLOCK_SIZE)) out == NULL || outSz < (inSz + KEYWRAP_BLOCK_SIZE))
@ -10962,8 +10985,8 @@ int wc_AesKeyWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out,
XMEMCPY(tmp, iv, KEYWRAP_BLOCK_SIZE); XMEMCPY(tmp, iv, KEYWRAP_BLOCK_SIZE);
} }
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
for (j = 0; j <= 5; j++) { for (j = 0; j <= 5; j++) {
for (i = 1; i <= inSz / KEYWRAP_BLOCK_SIZE; i++) { for (i = 1; i <= inSz / KEYWRAP_BLOCK_SIZE; i++) {
@ -11048,6 +11071,8 @@ int wc_AesKeyUnWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out,
0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6
}; };
int ret;
if (aes == NULL || in == NULL || inSz < 3 * KEYWRAP_BLOCK_SIZE || if (aes == NULL || in == NULL || inSz < 3 * KEYWRAP_BLOCK_SIZE ||
out == NULL || outSz < (inSz - KEYWRAP_BLOCK_SIZE)) out == NULL || outSz < (inSz - KEYWRAP_BLOCK_SIZE))
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
@ -11067,8 +11092,8 @@ int wc_AesKeyUnWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out,
XMEMCPY(out, in + KEYWRAP_BLOCK_SIZE, inSz - KEYWRAP_BLOCK_SIZE); XMEMCPY(out, in + KEYWRAP_BLOCK_SIZE, inSz - KEYWRAP_BLOCK_SIZE);
XMEMSET(t, 0, sizeof(t)); XMEMSET(t, 0, sizeof(t));
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
/* initialize counter to 6n */ /* initialize counter to 6n */
n = (inSz - 1) / KEYWRAP_BLOCK_SIZE; n = (inSz - 1) / KEYWRAP_BLOCK_SIZE;
@ -11351,8 +11376,8 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz,
XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES
* key setup passed to encrypt direct*/ * key setup passed to encrypt direct*/
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
wc_AesEncryptDirect(tweak, tmp, i); wc_AesEncryptDirect(tweak, tmp, i);
@ -11467,8 +11492,8 @@ int wc_AesXtsDecrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz,
XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES
* key setup passed to decrypt direct*/ * key setup passed to decrypt direct*/
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
wc_AesEncryptDirect(tweak, tmp, i); wc_AesEncryptDirect(tweak, tmp, i);

View File

@ -418,16 +418,18 @@ int wc_Chacha_Process(ChaCha* ctx, byte* output, const byte* input,
#ifdef HAVE_INTEL_AVX2 #ifdef HAVE_INTEL_AVX2
if (IS_INTEL_AVX2(cpuidFlags)) { if (IS_INTEL_AVX2(cpuidFlags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
chacha_encrypt_avx2(ctx, input, output, msglen); chacha_encrypt_avx2(ctx, input, output, msglen);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
return 0; return 0;
} }
#endif #endif
if (IS_INTEL_AVX1(cpuidFlags)) { if (IS_INTEL_AVX1(cpuidFlags)) {
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
chacha_encrypt_avx1(ctx, input, output, msglen); chacha_encrypt_avx1(ctx, input, output, msglen);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
return 0; return 0;

View File

@ -190,10 +190,10 @@ int wc_CmacUpdate(Cmac* cmac, const byte* in, word32 inSz)
#endif #endif
cmac->totalSz += AES_BLOCK_SIZE; cmac->totalSz += AES_BLOCK_SIZE;
cmac->bufferSz = 0; cmac->bufferSz = 0;
}
#ifdef WOLFSSL_LINUXKM #ifdef WOLFSSL_LINUXKM
} }
#endif #endif
}
} }
return ret; return ret;

View File

@ -128,8 +128,8 @@ int wc_curve25519_make_pub(int public_size, byte* pub, int private_size,
#else #else
fe_init(); fe_init();
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
ret = curve25519(pub, priv, kCurve25519BasePoint); ret = curve25519(pub, priv, kCurve25519BasePoint);
@ -171,8 +171,8 @@ int wc_curve25519_generic(int public_size, byte* pub,
fe_init(); fe_init();
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
ret = curve25519(pub, priv, basepoint); ret = curve25519(pub, priv, basepoint);
@ -289,8 +289,8 @@ int wc_curve25519_shared_secret_ex(curve25519_key* private_key,
ret = nxp_ltc_curve25519(&o, private_key->k, &public_key->p, ret = nxp_ltc_curve25519(&o, private_key->k, &public_key->p,
kLTC_Curve25519); kLTC_Curve25519);
#else #else
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
ret = curve25519(o.point, private_key->k, public_key->p.point); ret = curve25519(o.point, private_key->k, public_key->p.point);

View File

@ -1139,42 +1139,60 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
#if defined(WOLFSSL_LINUXKM_SIMD_X86) && defined(WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED) #if defined(WOLFSSL_LINUXKM_SIMD_X86) && defined(WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED)
static __must_check inline int am_in_hard_interrupt_handler(void) { static WARN_UNUSED_RESULT inline int am_in_hard_interrupt_handler(void)
{
return (preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0; return (preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0;
} }
__must_check int allocate_wolfcrypt_irq_fpu_states(void) { WARN_UNUSED_RESULT int allocate_wolfcrypt_irq_fpu_states(void)
wolfcrypt_irq_fpu_states = (union fpregs_state **)kzalloc(nr_cpu_ids * sizeof(struct fpu_state *), GFP_KERNEL); {
wolfcrypt_irq_fpu_states =
(union fpregs_state **)kzalloc(nr_cpu_ids
* sizeof(struct fpu_state *),
GFP_KERNEL);
if (! wolfcrypt_irq_fpu_states) { if (! wolfcrypt_irq_fpu_states) {
pr_err("warning, allocation of %lu bytes for wolfcrypt_irq_fpu_states failed.\n", nr_cpu_ids * sizeof(struct fpu_state *)); pr_err("warning, allocation of %lu bytes for "
"wolfcrypt_irq_fpu_states failed.\n",
nr_cpu_ids * sizeof(struct fpu_state *));
return MEMORY_E; return MEMORY_E;
} }
{ {
unsigned int i; typeof(nr_cpu_ids) i;
for (i=0; i<nr_cpu_ids; ++i) { for (i=0; i<nr_cpu_ids; ++i) {
_Static_assert(sizeof(union fpregs_state) <= PAGE_SIZE, "union fpregs_state is larger than expected."); _Static_assert(sizeof(union fpregs_state) <= PAGE_SIZE,
wolfcrypt_irq_fpu_states[i] = (union fpregs_state *)kzalloc(PAGE_SIZE /* sizeof(union fpregs_state) */, GFP_KERNEL); "union fpregs_state is larger than expected.");
wolfcrypt_irq_fpu_states[i] =
(union fpregs_state *)kzalloc(PAGE_SIZE
/* sizeof(union fpregs_state) */,
GFP_KERNEL);
if (! wolfcrypt_irq_fpu_states[i]) if (! wolfcrypt_irq_fpu_states[i])
break; break;
/* double-check that the allocation is 64-byte-aligned as needed for xsave. */ /* double-check that the allocation is 64-byte-aligned as needed
* for xsave.
*/
if ((unsigned long)wolfcrypt_irq_fpu_states[i] & 63UL) { if ((unsigned long)wolfcrypt_irq_fpu_states[i] & 63UL) {
pr_err("warning, allocation for wolfcrypt_irq_fpu_states was not properly aligned (%px).\n", wolfcrypt_irq_fpu_states[i]); pr_err("warning, allocation for wolfcrypt_irq_fpu_states "
"was not properly aligned (%px).\n",
wolfcrypt_irq_fpu_states[i]);
kfree(wolfcrypt_irq_fpu_states[i]); kfree(wolfcrypt_irq_fpu_states[i]);
wolfcrypt_irq_fpu_states[i] = 0; wolfcrypt_irq_fpu_states[i] = 0;
break; break;
} }
} }
if (i < nr_cpu_ids) { if (i < nr_cpu_ids) {
pr_err("warning, only %u/%u allocations succeeded for wolfcrypt_irq_fpu_states.\n", i, nr_cpu_ids); pr_err("warning, only %u/%u allocations succeeded for "
"wolfcrypt_irq_fpu_states.\n",
i, nr_cpu_ids);
return MEMORY_E; return MEMORY_E;
} }
} }
return 0; return 0;
} }
void free_wolfcrypt_irq_fpu_states(void) { void free_wolfcrypt_irq_fpu_states(void)
{
if (wolfcrypt_irq_fpu_states) { if (wolfcrypt_irq_fpu_states) {
unsigned int i; typeof(nr_cpu_ids) i;
for (i=0; i<nr_cpu_ids; ++i) { for (i=0; i<nr_cpu_ids; ++i) {
if (wolfcrypt_irq_fpu_states[i]) if (wolfcrypt_irq_fpu_states[i])
kfree(wolfcrypt_irq_fpu_states[i]); kfree(wolfcrypt_irq_fpu_states[i]);
@ -1184,7 +1202,8 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
} }
} }
__must_check int save_vector_registers_x86(void) { WARN_UNUSED_RESULT int save_vector_registers_x86(void)
{
preempt_disable(); preempt_disable();
if (! irq_fpu_usable()) { if (! irq_fpu_usable()) {
if (am_in_hard_interrupt_handler()) { if (am_in_hard_interrupt_handler()) {
@ -1195,77 +1214,93 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL;
preempt_enable(); preempt_enable();
if (! warned_on_null_wolfcrypt_irq_fpu_states) { if (! warned_on_null_wolfcrypt_irq_fpu_states) {
warned_on_null_wolfcrypt_irq_fpu_states = 1; warned_on_null_wolfcrypt_irq_fpu_states = 1;
pr_err("save_vector_registers_x86 with null wolfcrypt_irq_fpu_states.\n"); pr_err("save_vector_registers_x86 with null "
"wolfcrypt_irq_fpu_states.\n");
} }
return EFAULT; return BAD_STATE_E;
} }
processor_id = __smp_processor_id(); processor_id = __smp_processor_id();
if (! wolfcrypt_irq_fpu_states[processor_id]) { if (! wolfcrypt_irq_fpu_states[processor_id]) {
static int warned_on_null_wolfcrypt_irq_fpu_states_processor_id = -1; static int _warned_on_null = -1;
preempt_enable(); preempt_enable();
if (warned_on_null_wolfcrypt_irq_fpu_states_processor_id < processor_id) { if (_warned_on_null < processor_id) {
warned_on_null_wolfcrypt_irq_fpu_states_processor_id = processor_id; _warned_on_null = processor_id;
pr_err("save_vector_registers_x86 for cpu id %d with null wolfcrypt_irq_fpu_states[id].\n", processor_id); pr_err("save_vector_registers_x86 for cpu id %d with "
"null wolfcrypt_irq_fpu_states[id].\n",
processor_id);
} }
return EFAULT; return BAD_STATE_E;
} }
/* check for nested interrupts -- doesn't exist on x86, but make /* check for nested interrupts -- doesn't exist on x86, but make
* sure, in case something changes. * sure, in case something changes.
*
* (see https://stackoverflow.com/questions/23324084/nested-interrupt-handling-in-arm)
*/ */
if (((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] != 0) { if (((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] != 0) {
preempt_enable(); preempt_enable();
pr_err("save_vector_registers_x86 called recursively for cpu id %d.\n", processor_id); pr_err("save_vector_registers_x86 called recursively for "
return EPERM; "cpu id %d.\n", processor_id);
return BAD_STATE_E;
} }
/* note, fpregs_lock() is not needed here, because /* note, fpregs_lock() is not needed here, because
* interrupts/preemptions are already disabled here. * interrupts/preemptions are already disabled here.
*/ */
{ {
/* save_fpregs_to_fpstate() only accesses fpu->state, which has /* save_fpregs_to_fpstate() only accesses fpu->state, which
* stringent alignment requirements (64 byte cache line), but takes * has stringent alignment requirements (64 byte cache
* a pointer to the parent struct. work around this. * line), but takes a pointer to the parent struct. work
* around this.
*/ */
struct fpu *fake_fpu_pointer = (struct fpu *)(((char *)wolfcrypt_irq_fpu_states[processor_id]) - offsetof(struct fpu, state)); struct fpu *fake_fpu_pointer =
(struct fpu *)(((char *)wolfcrypt_irq_fpu_states[processor_id])
- offsetof(struct fpu, state));
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
copy_fpregs_to_fpstate(fake_fpu_pointer); copy_fpregs_to_fpstate(fake_fpu_pointer);
#else #else
save_fpregs_to_fpstate(fake_fpu_pointer); save_fpregs_to_fpstate(fake_fpu_pointer);
#endif #endif
} }
((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 1; /* mark the slot as used. */ /* mark the slot as used. */
/* note, not preempt_enable()ing, mirroring kernel_fpu_begin() semantics. */ ((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 1;
/* note, not preempt_enable()ing, mirroring kernel_fpu_begin()
* semantics.
*/
return 0; return 0;
} }
preempt_enable(); preempt_enable();
return EPERM; return BAD_STATE_E;
} else { } else {
kernel_fpu_begin(); kernel_fpu_begin();
preempt_enable(); /* kernel_fpu_begin() does its own preempt_disable(). decrement ours. */ preempt_enable(); /* kernel_fpu_begin() does its own
* preempt_disable(). decrement ours.
*/
return 0; return 0;
} }
} }
void restore_vector_registers_x86(void) { void restore_vector_registers_x86(void)
{
if (am_in_hard_interrupt_handler()) { if (am_in_hard_interrupt_handler()) {
int processor_id = __smp_processor_id(); int processor_id = __smp_processor_id();
if (((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1]) { if ((wolfcrypt_irq_fpu_states == NULL) ||
(wolfcrypt_irq_fpu_states[processor_id] == NULL) ||
(((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] == 0))
{
pr_err("restore_vector_registers_x86 called for cpu id %d "
"without saved context.\n", processor_id);
preempt_enable(); /* just in case */
return;
} else {
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0)
copy_kernel_to_fpregs(wolfcrypt_irq_fpu_states[processor_id]); copy_kernel_to_fpregs(wolfcrypt_irq_fpu_states[processor_id]);
#else #else
__restore_fpregs_from_fpstate(wolfcrypt_irq_fpu_states[processor_id], xfeatures_mask_all); __restore_fpregs_from_fpstate(wolfcrypt_irq_fpu_states[processor_id],
xfeatures_mask_all);
#endif #endif
((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 0; ((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 0;
preempt_enable(); preempt_enable();
return; return;
} else {
pr_err("restore_vector_registers_x86 called for cpu id %d without saved context.\n", processor_id);
preempt_enable(); /* just in case */
return;
} }
} }
kernel_fpu_end(); kernel_fpu_end();

View File

@ -267,8 +267,9 @@ static int poly1305_blocks(Poly1305* ctx, const unsigned char *m,
{ {
#ifdef USE_INTEL_SPEEDUP #ifdef USE_INTEL_SPEEDUP
/* AVX2 is handled in wc_Poly1305Update. */ /* AVX2 is handled in wc_Poly1305Update. */
if (SAVE_VECTOR_REGISTERS() != 0) int ret = SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
poly1305_blocks_avx(ctx, m, bytes); poly1305_blocks_avx(ctx, m, bytes);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
return 0; return 0;
@ -402,8 +403,9 @@ static int poly1305_block(Poly1305* ctx, const unsigned char *m)
{ {
#ifdef USE_INTEL_SPEEDUP #ifdef USE_INTEL_SPEEDUP
/* No call to poly1305_block when AVX2, AVX2 does 4 blocks at a time. */ /* No call to poly1305_block when AVX2, AVX2 does 4 blocks at a time. */
if (SAVE_VECTOR_REGISTERS() != 0) int ret= SAVE_VECTOR_REGISTERS();
return BAD_STATE_E; if (ret != 0)
return ret;
poly1305_block_avx(ctx, m); poly1305_block_avx(ctx, m);
RESTORE_VECTOR_REGISTERS(); RESTORE_VECTOR_REGISTERS();
return 0; return 0;
@ -442,8 +444,11 @@ int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz)
intel_flags = cpuid_get_flags(); intel_flags = cpuid_get_flags();
cpu_flags_set = 1; cpu_flags_set = 1;
} }
if (SAVE_VECTOR_REGISTERS() != 0) {
return BAD_STATE_E; int ret = SAVE_VECTOR_REGISTERS();
if (ret != 0)
return ret;
}
#ifdef HAVE_INTEL_AVX2 #ifdef HAVE_INTEL_AVX2
if (IS_INTEL_AVX2(intel_flags)) if (IS_INTEL_AVX2(intel_flags))
poly1305_setkey_avx2(ctx, key); poly1305_setkey_avx2(ctx, key);
@ -506,6 +511,7 @@ int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz)
int wc_Poly1305Final(Poly1305* ctx, byte* mac) int wc_Poly1305Final(Poly1305* ctx, byte* mac)
{ {
#ifdef USE_INTEL_SPEEDUP #ifdef USE_INTEL_SPEEDUP
int ret;
#elif defined(POLY130564) #elif defined(POLY130564)
word64 h0,h1,h2,c; word64 h0,h1,h2,c;
@ -525,8 +531,8 @@ int wc_Poly1305Final(Poly1305* ctx, byte* mac)
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
#ifdef USE_INTEL_SPEEDUP #ifdef USE_INTEL_SPEEDUP
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
#ifdef HAVE_INTEL_AVX2 #ifdef HAVE_INTEL_AVX2
if (IS_INTEL_AVX2(intel_flags)) if (IS_INTEL_AVX2(intel_flags))
poly1305_final_avx2(ctx, mac); poly1305_final_avx2(ctx, mac);
@ -714,9 +720,9 @@ int wc_Poly1305Update(Poly1305* ctx, const byte* m, word32 bytes)
#ifdef USE_INTEL_SPEEDUP #ifdef USE_INTEL_SPEEDUP
#ifdef HAVE_INTEL_AVX2 #ifdef HAVE_INTEL_AVX2
if (IS_INTEL_AVX2(intel_flags)) { if (IS_INTEL_AVX2(intel_flags)) {
int ret = SAVE_VECTOR_REGISTERS();
if (SAVE_VECTOR_REGISTERS() != 0) if (ret != 0)
return BAD_STATE_E; return ret;
/* handle leftover */ /* handle leftover */

View File

@ -368,11 +368,17 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen,
byte* buffer = staticBuffer; byte* buffer = staticBuffer;
#ifdef WOLFSSL_SMALL_STACK #ifdef WOLFSSL_SMALL_STACK
byte* Ai; byte* Ai = NULL;
byte* B; byte* B = NULL;
mp_int *B1 = NULL;
mp_int *i1 = NULL;
mp_int *res = NULL;
#else #else
byte Ai[WC_MAX_DIGEST_SIZE]; byte Ai[WC_MAX_DIGEST_SIZE];
byte B[WC_MAX_BLOCK_SIZE]; byte B[WC_MAX_BLOCK_SIZE];
mp_int B1[1];
mp_int i1[1];
mp_int res[1];
#endif #endif
enum wc_HashType hashT; enum wc_HashType hashT;
@ -449,9 +455,20 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen,
for (i = 0; i < (int)pLen; i++) for (i = 0; i < (int)pLen; i++)
P[i] = passwd[i % passLen]; P[i] = passwd[i % passLen];
#ifdef WOLFSSL_SMALL_STACK
if (((B1 = (mp_int *)XMALLOC(sizeof(*B1), heap, DYNAMIC_TYPE_TMP_BUFFER))
== NULL) ||
((i1 = (mp_int *)XMALLOC(sizeof(*i1), heap, DYNAMIC_TYPE_TMP_BUFFER))
== NULL) ||
((res = (mp_int *)XMALLOC(sizeof(*res), heap, DYNAMIC_TYPE_TMP_BUFFER))
== NULL)) {
ret = MEMORY_E;
goto out;
}
#endif
while (kLen > 0) { while (kLen > 0) {
word32 currentLen; word32 currentLen;
mp_int B1;
ret = DoPKCS12Hash(hashType, buffer, totalLen, Ai, u, iterations); ret = DoPKCS12Hash(hashType, buffer, totalLen, Ai, u, iterations);
if (ret < 0) if (ret < 0)
@ -460,55 +477,53 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen,
for (i = 0; i < (int)v; i++) for (i = 0; i < (int)v; i++)
B[i] = Ai[i % u]; B[i] = Ai[i % u];
if (mp_init(&B1) != MP_OKAY) if (mp_init(B1) != MP_OKAY)
ret = MP_INIT_E; ret = MP_INIT_E;
else if (mp_read_unsigned_bin(&B1, B, v) != MP_OKAY) else if (mp_read_unsigned_bin(B1, B, v) != MP_OKAY)
ret = MP_READ_E; ret = MP_READ_E;
else if (mp_add_d(&B1, (mp_digit)1, &B1) != MP_OKAY) else if (mp_add_d(B1, (mp_digit)1, B1) != MP_OKAY)
ret = MP_ADD_E; ret = MP_ADD_E;
if (ret != 0) { if (ret != 0) {
mp_clear(&B1); mp_clear(B1);
break; break;
} }
for (i = 0; i < (int)iLen; i += v) { for (i = 0; i < (int)iLen; i += v) {
int outSz; int outSz;
mp_int i1;
mp_int res;
if (mp_init_multi(&i1, &res, NULL, NULL, NULL, NULL) != MP_OKAY) { if (mp_init_multi(i1, res, NULL, NULL, NULL, NULL) != MP_OKAY) {
ret = MP_INIT_E; ret = MP_INIT_E;
break; break;
} }
if (mp_read_unsigned_bin(&i1, I + i, v) != MP_OKAY) if (mp_read_unsigned_bin(i1, I + i, v) != MP_OKAY)
ret = MP_READ_E; ret = MP_READ_E;
else if (mp_add(&i1, &B1, &res) != MP_OKAY) else if (mp_add(i1, B1, res) != MP_OKAY)
ret = MP_ADD_E; ret = MP_ADD_E;
else if ( (outSz = mp_unsigned_bin_size(&res)) < 0) else if ( (outSz = mp_unsigned_bin_size(res)) < 0)
ret = MP_TO_E; ret = MP_TO_E;
else { else {
if (outSz > (int)v) { if (outSz > (int)v) {
/* take off MSB */ /* take off MSB */
byte tmp[WC_MAX_BLOCK_SIZE + 1]; byte tmp[WC_MAX_BLOCK_SIZE + 1];
ret = mp_to_unsigned_bin(&res, tmp); ret = mp_to_unsigned_bin(res, tmp);
XMEMCPY(I + i, tmp + 1, v); XMEMCPY(I + i, tmp + 1, v);
} }
else if (outSz < (int)v) { else if (outSz < (int)v) {
XMEMSET(I + i, 0, v - outSz); XMEMSET(I + i, 0, v - outSz);
ret = mp_to_unsigned_bin(&res, I + i + v - outSz); ret = mp_to_unsigned_bin(res, I + i + v - outSz);
} }
else else
ret = mp_to_unsigned_bin(&res, I + i); ret = mp_to_unsigned_bin(res, I + i);
} }
mp_clear(&i1); mp_clear(i1);
mp_clear(&res); mp_clear(res);
if (ret < 0) break; if (ret < 0) break;
} }
if (ret < 0) { if (ret < 0) {
mp_clear(&B1); mp_clear(B1);
break; break;
} }
@ -516,16 +531,27 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen,
XMEMCPY(output, Ai, currentLen); XMEMCPY(output, Ai, currentLen);
output += currentLen; output += currentLen;
kLen -= currentLen; kLen -= currentLen;
mp_clear(&B1); mp_clear(B1);
} }
if (dynamic) XFREE(buffer, heap, DYNAMIC_TYPE_KEY);
#ifdef WOLFSSL_SMALL_STACK #ifdef WOLFSSL_SMALL_STACK
XFREE(Ai, heap, DYNAMIC_TYPE_TMP_BUFFER); out:
XFREE(B, heap, DYNAMIC_TYPE_TMP_BUFFER);
if (Ai)
XFREE(Ai, heap, DYNAMIC_TYPE_TMP_BUFFER);
if (B)
XFREE(B, heap, DYNAMIC_TYPE_TMP_BUFFER);
if (B1)
XFREE(B1, heap, DYNAMIC_TYPE_TMP_BUFFER);
if (i1)
XFREE(i1, heap, DYNAMIC_TYPE_TMP_BUFFER);
if (res)
XFREE(res, heap, DYNAMIC_TYPE_TMP_BUFFER);
#endif #endif
if (dynamic)
XFREE(buffer, heap, DYNAMIC_TYPE_KEY);
return ret; return ret;
} }

View File

@ -4097,7 +4097,11 @@ int wc_CheckProbablePrime_ex(const byte* pRaw, word32 pRawSz,
const byte* eRaw, word32 eRawSz, const byte* eRaw, word32 eRawSz,
int nlen, int* isPrime, WC_RNG* rng) int nlen, int* isPrime, WC_RNG* rng)
{ {
mp_int p, q, e; #ifdef WOLFSSL_SMALL_STACK
mp_int *p = NULL, *q = NULL, *e = NULL;
#else
mp_int p[1], q[1], e[1];
#endif
mp_int* Q = NULL; mp_int* Q = NULL;
int ret; int ret;
@ -4111,30 +4115,54 @@ int wc_CheckProbablePrime_ex(const byte* pRaw, word32 pRawSz,
if ((qRaw != NULL && qRawSz == 0) || (qRaw == NULL && qRawSz != 0)) if ((qRaw != NULL && qRawSz == 0) || (qRaw == NULL && qRawSz != 0))
return BAD_FUNC_ARG; return BAD_FUNC_ARG;
ret = mp_init_multi(&p, &q, &e, NULL, NULL, NULL); #ifdef WOLFSSL_SMALL_STACK
if (((p = (mp_int *)XMALLOC(sizeof(*p), NULL, DYNAMIC_TYPE_RSA_BUFFER)) == NULL) ||
((q = (mp_int *)XMALLOC(sizeof(*q), NULL, DYNAMIC_TYPE_RSA_BUFFER)) == NULL) ||
((e = (mp_int *)XMALLOC(sizeof(*e), NULL, DYNAMIC_TYPE_RSA_BUFFER)) == NULL))
ret = MEMORY_E;
else
ret = 0;
if (ret == 0)
#endif
ret = mp_init_multi(p, q, e, NULL, NULL, NULL);
if (ret == MP_OKAY) if (ret == MP_OKAY)
ret = mp_read_unsigned_bin(&p, pRaw, pRawSz); ret = mp_read_unsigned_bin(p, pRaw, pRawSz);
if (ret == MP_OKAY) { if (ret == MP_OKAY) {
if (qRaw != NULL) { if (qRaw != NULL) {
ret = mp_read_unsigned_bin(&q, qRaw, qRawSz); ret = mp_read_unsigned_bin(q, qRaw, qRawSz);
if (ret == MP_OKAY) if (ret == MP_OKAY)
Q = &q; Q = q;
} }
} }
if (ret == MP_OKAY) if (ret == MP_OKAY)
ret = mp_read_unsigned_bin(&e, eRaw, eRawSz); ret = mp_read_unsigned_bin(e, eRaw, eRawSz);
if (ret == MP_OKAY) if (ret == MP_OKAY)
ret = _CheckProbablePrime(&p, Q, &e, nlen, isPrime, rng); ret = _CheckProbablePrime(p, Q, e, nlen, isPrime, rng);
ret = (ret == MP_OKAY) ? 0 : PRIME_GEN_E; ret = (ret == MP_OKAY) ? 0 : PRIME_GEN_E;
mp_clear(&p); #ifdef WOLFSSL_SMALL_STACK
mp_clear(&q); if (p) {
mp_clear(&e); mp_clear(p);
XFREE(p, NULL, DYNAMIC_TYPE_RSA_BUFFER);
}
if (q) {
mp_clear(q);
XFREE(q, NULL, DYNAMIC_TYPE_RSA_BUFFER);
}
if (e) {
mp_clear(e);
XFREE(e, NULL, DYNAMIC_TYPE_RSA_BUFFER);
}
#else
mp_clear(p);
mp_clear(q);
mp_clear(e);
#endif
return ret; return ret;
} }

View File

@ -320,8 +320,8 @@ static int InitSha256(wc_Sha256* sha256)
static WC_INLINE int inline_XTRANSFORM(wc_Sha256* S, const byte* D) { static WC_INLINE int inline_XTRANSFORM(wc_Sha256* S, const byte* D) {
int ret; int ret;
if (Transform_Sha256_is_vectorized) { if (Transform_Sha256_is_vectorized) {
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
} }
ret = (*Transform_Sha256_p)(S, D); ret = (*Transform_Sha256_p)(S, D);
if (Transform_Sha256_is_vectorized) if (Transform_Sha256_is_vectorized)
@ -333,8 +333,8 @@ static int InitSha256(wc_Sha256* sha256)
static WC_INLINE int inline_XTRANSFORM_LEN(wc_Sha256* S, const byte* D, word32 L) { static WC_INLINE int inline_XTRANSFORM_LEN(wc_Sha256* S, const byte* D, word32 L) {
int ret; int ret;
if (Transform_Sha256_is_vectorized) { if (Transform_Sha256_is_vectorized) {
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
} }
ret = (*Transform_Sha256_Len_p)(S, D, L); ret = (*Transform_Sha256_Len_p)(S, D, L);
if (Transform_Sha256_is_vectorized) if (Transform_Sha256_is_vectorized)

View File

@ -449,8 +449,8 @@ static int InitSha512_256(wc_Sha512* sha512)
static WC_INLINE int Transform_Sha512(wc_Sha512 *sha512) { static WC_INLINE int Transform_Sha512(wc_Sha512 *sha512) {
int ret; int ret;
if (Transform_Sha512_is_vectorized) { if (Transform_Sha512_is_vectorized) {
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
} }
ret = (*Transform_Sha512_p)(sha512); ret = (*Transform_Sha512_p)(sha512);
if (Transform_Sha512_is_vectorized) if (Transform_Sha512_is_vectorized)
@ -460,8 +460,8 @@ static int InitSha512_256(wc_Sha512* sha512)
static WC_INLINE int Transform_Sha512_Len(wc_Sha512 *sha512, word32 len) { static WC_INLINE int Transform_Sha512_Len(wc_Sha512 *sha512, word32 len) {
int ret; int ret;
if (Transform_Sha512_is_vectorized) { if (Transform_Sha512_is_vectorized) {
if (SAVE_VECTOR_REGISTERS() != 0) if ((ret = SAVE_VECTOR_REGISTERS()) != 0)
return BAD_STATE_E; return ret;
} }
ret = (*Transform_Sha512_Len_p)(sha512, len); ret = (*Transform_Sha512_Len_p)(sha512, len);
if (Transform_Sha512_is_vectorized) if (Transform_Sha512_is_vectorized)

View File

@ -2454,7 +2454,11 @@ time_t time(time_t * timer)
ret = ts.tv_sec; ret = ts.tv_sec;
#else #else
struct timespec64 ts; struct timespec64 ts;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
ts = current_kernel_time64();
#else
ktime_get_coarse_real_ts64(&ts); ktime_get_coarse_real_ts64(&ts);
#endif
ret = ts.tv_sec; ret = ts.tv_sec;
#endif #endif
if (timer) if (timer)

View File

@ -320,6 +320,16 @@ decouple library dependencies with standard string, memory and so on.
#define FALL_THROUGH #define FALL_THROUGH
#endif #endif
#ifndef WARN_UNUSED_RESULT
#if defined(WOLFSSL_LINUXKM) && defined(__must_check)
#define WARN_UNUSED_RESULT __must_check
#elif defined(__GNUC__) && (__GNUC__ >= 4)
#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#else
#define WARN_UNUSED_RESULT
#endif
#endif /* WARN_UNUSED_RESULT */
/* Micrium will use Visual Studio for compilation but not the Win32 API */ /* Micrium will use Visual Studio for compilation but not the Win32 API */
#if defined(_WIN32) && !defined(MICRIUM) && !defined(FREERTOS) && \ #if defined(_WIN32) && !defined(MICRIUM) && !defined(FREERTOS) && \
!defined(FREERTOS_TCP) && !defined(EBSNET) && \ !defined(FREERTOS_TCP) && !defined(EBSNET) && \

View File

@ -133,7 +133,9 @@
#else #else
#include <asm/simd.h> #include <asm/simd.h>
#endif #endif
#include <asm/fpu/internal.h> #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
#include <asm/fpu/internal.h>
#endif
#ifndef SAVE_VECTOR_REGISTERS #ifndef SAVE_VECTOR_REGISTERS
#define SAVE_VECTOR_REGISTERS() save_vector_registers_x86() #define SAVE_VECTOR_REGISTERS() save_vector_registers_x86()
#endif #endif
@ -263,7 +265,13 @@
typeof(kmalloc_order_trace) *kmalloc_order_trace; typeof(kmalloc_order_trace) *kmalloc_order_trace;
typeof(get_random_bytes) *get_random_bytes; typeof(get_random_bytes) *get_random_bytes;
typeof(ktime_get_coarse_real_ts64) *ktime_get_coarse_real_ts64; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
typeof(getnstimeofday) *getnstimeofday;
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
typeof(current_kernel_time64) *current_kernel_time64;
#else
typeof(ktime_get_coarse_real_ts64) *ktime_get_coarse_real_ts64;
#endif
struct task_struct *(*get_current)(void); struct task_struct *(*get_current)(void);
int (*preempt_count)(void); int (*preempt_count)(void);
@ -296,8 +304,15 @@
#endif /* WOLFSSL_LINUXKM_SIMD_X86 */ #endif /* WOLFSSL_LINUXKM_SIMD_X86 */
typeof(__mutex_init) *__mutex_init; typeof(__mutex_init) *__mutex_init;
typeof(mutex_lock) *mutex_lock; #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
typeof(mutex_lock_nested) *mutex_lock_nested;
#else
typeof(mutex_lock) *mutex_lock;
#endif
typeof(mutex_unlock) *mutex_unlock; typeof(mutex_unlock) *mutex_unlock;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
typeof(mutex_destroy) *mutex_destroy;
#endif
#ifdef HAVE_FIPS #ifdef HAVE_FIPS
typeof(wolfCrypt_FIPS_first) *wolfCrypt_FIPS_first; typeof(wolfCrypt_FIPS_first) *wolfCrypt_FIPS_first;
@ -369,7 +384,13 @@
#define kmalloc_order_trace (wolfssl_linuxkm_get_pie_redirect_table()->kmalloc_order_trace) #define kmalloc_order_trace (wolfssl_linuxkm_get_pie_redirect_table()->kmalloc_order_trace)
#define get_random_bytes (wolfssl_linuxkm_get_pie_redirect_table()->get_random_bytes) #define get_random_bytes (wolfssl_linuxkm_get_pie_redirect_table()->get_random_bytes)
#define ktime_get_coarse_real_ts64 (wolfssl_linuxkm_get_pie_redirect_table()->ktime_get_coarse_real_ts64) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
#define getnstimeofday (wolfssl_linuxkm_get_pie_redirect_table()->getnstimeofday)
#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
#define current_kernel_time64 (wolfssl_linuxkm_get_pie_redirect_table()->current_kernel_time64)
#else
#define ktime_get_coarse_real_ts64 (wolfssl_linuxkm_get_pie_redirect_table()->ktime_get_coarse_real_ts64)
#endif
#undef get_current #undef get_current
#define get_current (wolfssl_linuxkm_get_pie_redirect_table()->get_current) #define get_current (wolfssl_linuxkm_get_pie_redirect_table()->get_current)
@ -399,8 +420,15 @@
#endif #endif
#define __mutex_init (wolfssl_linuxkm_get_pie_redirect_table()->__mutex_init) #define __mutex_init (wolfssl_linuxkm_get_pie_redirect_table()->__mutex_init)
#define mutex_lock (wolfssl_linuxkm_get_pie_redirect_table()->mutex_lock) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
#define mutex_lock_nested (wolfssl_linuxkm_get_pie_redirect_table()->mutex_lock_nested)
#else
#define mutex_lock (wolfssl_linuxkm_get_pie_redirect_table()->mutex_lock)
#endif
#define mutex_unlock (wolfssl_linuxkm_get_pie_redirect_table()->mutex_unlock) #define mutex_unlock (wolfssl_linuxkm_get_pie_redirect_table()->mutex_unlock)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
#define mutex_destroy (wolfssl_linuxkm_get_pie_redirect_table()->mutex_destroy)
#endif
/* per linux/ctype.h, tolower() and toupper() are macros bound to static inlines /* per linux/ctype.h, tolower() and toupper() are macros bound to static inlines
* that use macros that bring in the _ctype global. for __PIE__, this needs to * that use macros that bring in the _ctype global. for __PIE__, this needs to
@ -432,35 +460,37 @@
extern __must_check int save_vector_registers_x86(void); extern __must_check int save_vector_registers_x86(void);
extern void restore_vector_registers_x86(void); extern void restore_vector_registers_x86(void);
#else /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */ #else /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */
static __must_check inline int save_vector_registers_x86(void) { #define save_vector_registers_x86() ({ \
preempt_disable(); int _ret; \
if (! irq_fpu_usable()) { preempt_disable(); \
preempt_enable(); if (! irq_fpu_usable()) { \
return EPERM; preempt_enable(); \
} else { _ret = BAD_STATE_E; \
kernel_fpu_begin(); } else { \
preempt_enable(); /* kernel_fpu_begin() does its own preempt_disable(). decrement ours. */ kernel_fpu_begin(); \
return 0; preempt_enable(); /* kernel_fpu_begin() does its own preempt_disable(). decrement ours. */ \
} _ret = 0; \
} } \
static inline void restore_vector_registers_x86(void) { _ret; \
kernel_fpu_end(); })
} #define restore_vector_registers_x86() kernel_fpu_end()
#endif /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */ #endif /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */
#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) #elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
static __must_check inline int save_vector_registers_arm(void) { static WARN_UNUSED_RESULT inline int save_vector_registers_arm(void)
{
preempt_disable(); preempt_disable();
if (! may_use_simd()) { if (! may_use_simd()) {
preempt_enable(); preempt_enable();
return EPERM; return BAD_STATE_E;
} else { } else {
fpsimd_preserve_current_state(); fpsimd_preserve_current_state();
return 0; return 0;
} }
} }
static inline void restore_vector_registers_arm(void) { static inline void restore_vector_registers_arm(void)
{
fpsimd_restore_current_state(); fpsimd_restore_current_state();
preempt_enable(); preempt_enable();
} }
@ -469,7 +499,6 @@
#endif /* WOLFSSL_LINUXKM_SIMD */ #endif /* WOLFSSL_LINUXKM_SIMD */
/* Linux headers define these using C expressions, but we need /* Linux headers define these using C expressions, but we need
* them to be evaluable by the preprocessor, for use in sp_int.h. * them to be evaluable by the preprocessor, for use in sp_int.h.
*/ */
@ -565,8 +594,6 @@
extern void fipsEntry(void); extern void fipsEntry(void);
#endif #endif
#endif /* BUILDING_WOLFSSL */
/* needed to suppress inclusion of stdio.h in wolfssl/wolfcrypt/types.h */ /* needed to suppress inclusion of stdio.h in wolfssl/wolfcrypt/types.h */
#define XSNPRINTF snprintf #define XSNPRINTF snprintf
@ -580,6 +607,16 @@
(int)_xatoi_res; \ (int)_xatoi_res; \
}) })
/* suppress false-positive "writing 1 byte into a region of size 0" warnings
* building old kernels with new gcc:
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)
_Pragma("GCC diagnostic ignored \"-Wstringop-overflow\"");
#endif
#endif /* BUILDING_WOLFSSL */
#else /* ! WOLFSSL_LINUXKM */ #else /* ! WOLFSSL_LINUXKM */
#ifndef SAVE_VECTOR_REGISTERS #ifndef SAVE_VECTOR_REGISTERS