diff --git a/linuxkm/module_hooks.c b/linuxkm/module_hooks.c index 2e46463bf..5b3f48d74 100644 --- a/linuxkm/module_hooks.c +++ b/linuxkm/module_hooks.c @@ -123,6 +123,17 @@ static int wolfssl_init(void) #endif #ifdef HAVE_LINUXKM_PIE_SUPPORT + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) + #define THIS_MODULE_BASE (THIS_MODULE->core_layout.base) + #define THIS_MODULE_TEXT_SIZE (THIS_MODULE->core_layout.text_size) + #define THIS_MODULE_RO_SIZE (THIS_MODULE->core_layout.ro_size) +#else + #define THIS_MODULE_BASE (THIS_MODULE->module_core) + #define THIS_MODULE_TEXT_SIZE (THIS_MODULE->core_text_size) + #define THIS_MODULE_RO_SIZE (THIS_MODULE->core_ro_size) +#endif + { char *pie_text_start = (char *)wolfCrypt_PIE_first_function; char *pie_text_end = (char *)wolfCrypt_PIE_last_function; @@ -131,8 +142,8 @@ static int wolfssl_init(void) unsigned int text_hash, rodata_hash; if ((pie_text_start < pie_text_end) && - (pie_text_start >= (char *)(THIS_MODULE->core_layout.base)) && - (pie_text_end - (char *)(THIS_MODULE->core_layout.base) <= THIS_MODULE->core_layout.text_size)) + (pie_text_start >= (char *)THIS_MODULE_BASE) && + (pie_text_end - (char *)THIS_MODULE_BASE <= THIS_MODULE_TEXT_SIZE)) { text_hash = hash_span(pie_text_start, pie_text_end); } else { @@ -141,14 +152,14 @@ static int wolfssl_init(void) pie_text_start, pie_text_end, pie_text_end-pie_text_start, - THIS_MODULE->core_layout.base, - (char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.text_size); + THIS_MODULE_BASE, + (char *)THIS_MODULE_BASE + THIS_MODULE_TEXT_SIZE); text_hash = 0; } if ((pie_rodata_start < pie_rodata_end) && - (pie_rodata_start >= (char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.text_size) && - (pie_rodata_end - (char *)(THIS_MODULE->core_layout.base) <= THIS_MODULE->core_layout.ro_size)) + (pie_rodata_start >= (char *)THIS_MODULE_BASE + THIS_MODULE_TEXT_SIZE) && + (pie_rodata_end - (char *)THIS_MODULE_BASE <= THIS_MODULE_RO_SIZE)) { rodata_hash = hash_span(pie_rodata_start, pie_rodata_end); } else { @@ -157,8 +168,8 @@ static int wolfssl_init(void) pie_rodata_start, pie_rodata_end, pie_rodata_end-pie_rodata_start, - (char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.text_size, - (char *)(THIS_MODULE->core_layout.base) + THIS_MODULE->core_layout.ro_size); + (char *)THIS_MODULE_BASE + THIS_MODULE_TEXT_SIZE, + (char *)THIS_MODULE_BASE + THIS_MODULE_RO_SIZE); rodata_hash = 0; } @@ -169,7 +180,7 @@ static int wolfssl_init(void) pr_info("wolfCrypt container hashes (spans): %x (%lu) %x (%lu), module base %pK\n", text_hash, pie_text_end-pie_text_start, rodata_hash, pie_rodata_end-pie_rodata_start, - THIS_MODULE->core_layout.base); + THIS_MODULE_BASE); } #endif /* HAVE_LINUXKM_PIE_SUPPORT */ @@ -349,8 +360,16 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) { kmalloc_order_trace; wolfssl_linuxkm_pie_redirect_table.get_random_bytes = get_random_bytes; - wolfssl_linuxkm_pie_redirect_table.ktime_get_coarse_real_ts64 = - ktime_get_coarse_real_ts64; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + wolfssl_linuxkm_pie_redirect_table.getnstimeofday = + getnstimeofday; + #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) + wolfssl_linuxkm_pie_redirect_table.current_kernel_time64 = + current_kernel_time64; + #else + wolfssl_linuxkm_pie_redirect_table.ktime_get_coarse_real_ts64 = + ktime_get_coarse_real_ts64; + #endif wolfssl_linuxkm_pie_redirect_table.get_current = my_get_current_thread; wolfssl_linuxkm_pie_redirect_table.preempt_count = my_preempt_count; @@ -380,8 +399,15 @@ static int set_up_wolfssl_linuxkm_pie_redirect_table(void) { #endif wolfssl_linuxkm_pie_redirect_table.__mutex_init = __mutex_init; - wolfssl_linuxkm_pie_redirect_table.mutex_lock = mutex_lock; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + wolfssl_linuxkm_pie_redirect_table.mutex_lock_nested = mutex_lock_nested; + #else + wolfssl_linuxkm_pie_redirect_table.mutex_lock = mutex_lock; + #endif wolfssl_linuxkm_pie_redirect_table.mutex_unlock = mutex_unlock; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + wolfssl_linuxkm_pie_redirect_table.mutex_destroy = mutex_destroy; + #endif #ifdef HAVE_FIPS wolfssl_linuxkm_pie_redirect_table.wolfCrypt_FIPS_first = diff --git a/src/internal.c b/src/internal.c index c5b99086f..5e9b43e83 100644 --- a/src/internal.c +++ b/src/internal.c @@ -8269,11 +8269,9 @@ ProtocolVersion MakeDTLSv1_2(void) } #elif defined(WOLFSSL_LINUXKM) - #include - #include word32 LowResTimer(void) { - return (word32)ktime_get_real_ns(); + return (word32)time(NULL); } #else diff --git a/src/tls13.c b/src/tls13.c index bcea4f261..2e5980e60 100644 --- a/src/tls13.c +++ b/src/tls13.c @@ -1558,19 +1558,25 @@ end: return (word32)(uTaskerSystemTick / (TICK_RESOLUTION / 1000)); } #elif defined(WOLFSSL_LINUXKM) - /* The time in milliseconds. - * Used for tickets to represent difference between when first seen and when - * sending. - * - * returns the time in milliseconds as a 32-bit value. - */ word32 TimeNowInMilliseconds(void) { - #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) - return (word32)(ktime_get_real_ns() / (s64)1000000); - #else - return (word32)(ktime_get_real_ns() / (ktime_t)1000000); - #endif + s64 t; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + struct timespec ts; + getnstimeofday(&ts); + t = ts.tv_sec * (s64)1000; + t += ts.tv_nsec / (s64)1000000; +#else + struct timespec64 ts; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) + ts = current_kernel_time64(); +#else + ktime_get_coarse_real_ts64(&ts); +#endif + t = ts.tv_sec * 1000L; + t += ts.tv_nsec / 1000000L; +#endif + return (word32)t; } #elif defined(WOLFSSL_QNX_CAAM) word32 TimeNowInMilliseconds(void) diff --git a/wolfcrypt/src/aes.c b/wolfcrypt/src/aes.c index b916b7263..f81d08594 100644 --- a/wolfcrypt/src/aes.c +++ b/wolfcrypt/src/aes.c @@ -775,6 +775,7 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits const int bits, Aes* aes) { int nr; + int ret; #ifdef WOLFSSL_SMALL_STACK Aes *temp_key; #else @@ -805,11 +806,11 @@ block cipher mechanism that uses n-bit binary string parameter key with 128-bits nr = temp_key->rounds; aes->rounds = nr; - if (SAVE_VECTOR_REGISTERS() != 0) { + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) { #ifdef WOLFSSL_SMALL_STACK XFREE(temp_key, aes->heap, DYNAMIC_TYPE_AES); #endif - return BAD_STATE_E; + return ret; } Key_Schedule[nr] = Temp_Key_Schedule[0]; @@ -3066,8 +3067,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv) __must_check int wc_AesEncryptDirect(Aes* aes, byte* out, const byte* in) { if (haveAESNI && aes->use_aesni) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; } wc_AesEncrypt(aes, in, out); if (haveAESNI && aes->use_aesni) @@ -3084,8 +3086,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv) __must_check int wc_AesDecryptDirect(Aes* aes, byte* out, const byte* in) { if (haveAESNI && aes->use_aesni) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; } wc_AesDecrypt(aes, in, out); if (haveAESNI && aes->use_aesni) @@ -3869,6 +3872,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv) #ifdef WOLFSSL_AESNI if (haveAESNI) { + int ret; #ifdef DEBUG_AESNI printf("about to aes cbc encrypt\n"); printf("in = %p\n", in); @@ -3889,9 +3893,9 @@ int wc_AesSetIV(Aes* aes, const byte* iv) tmp_align = tmp + (AESNI_ALIGN - ((wc_ptr_t)tmp % AESNI_ALIGN)); XMEMCPY(tmp_align, in, sz); - if (SAVE_VECTOR_REGISTERS() != 0) { + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) { XFREE(tmp, aes->heap, DYNAMIC_TYPE_TMP_BUFFER); - return BAD_STATE_E; + return ret; } AES_CBC_encrypt(tmp_align, tmp_align, (byte*)aes->reg, sz, (byte*)aes->key, aes->rounds); @@ -3908,8 +3912,8 @@ int wc_AesSetIV(Aes* aes, const byte* iv) #endif } - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; AES_CBC_encrypt(in, out, (byte*)aes->reg, sz, (byte*)aes->key, aes->rounds); RESTORE_VECTOR_REGISTERS(); @@ -3994,6 +3998,7 @@ int wc_AesSetIV(Aes* aes, const byte* iv) #ifdef WOLFSSL_AESNI if (haveAESNI) { + int ret; #ifdef DEBUG_AESNI printf("about to aes cbc decrypt\n"); printf("in = %p\n", in); @@ -4006,8 +4011,8 @@ int wc_AesSetIV(Aes* aes, const byte* iv) /* if input and output same will overwrite input iv */ XMEMCPY(aes->tmp, in + sz - AES_BLOCK_SIZE, AES_BLOCK_SIZE); - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; #if defined(WOLFSSL_AESNI_BY4) AES_CBC_decrypt_by4(in, out, (byte*)aes->reg, sz, (byte*)aes->key, aes->rounds); @@ -7579,8 +7584,9 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, #ifdef WOLFSSL_AESNI #ifdef HAVE_INTEL_AVX2 if (IS_INTEL_AVX2(intel_flags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; AES_GCM_encrypt_avx2(in, out, authIn, iv, authTag, sz, authInSz, ivSz, authTagSz, (const byte*)aes->key, aes->rounds); RESTORE_VECTOR_REGISTERS(); @@ -7590,8 +7596,9 @@ int wc_AesGcmEncrypt(Aes* aes, byte* out, const byte* in, word32 sz, #endif #ifdef HAVE_INTEL_AVX1 if (IS_INTEL_AVX1(intel_flags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; AES_GCM_encrypt_avx1(in, out, authIn, iv, authTag, sz, authInSz, ivSz, authTagSz, (const byte*)aes->key, aes->rounds); RESTORE_VECTOR_REGISTERS(); @@ -8103,8 +8110,9 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, #ifdef WOLFSSL_AESNI #ifdef HAVE_INTEL_AVX2 if (IS_INTEL_AVX2(intel_flags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; AES_GCM_decrypt_avx2(in, out, authIn, iv, authTag, sz, authInSz, ivSz, authTagSz, (byte*)aes->key, aes->rounds, &res); RESTORE_VECTOR_REGISTERS(); @@ -8116,8 +8124,9 @@ int wc_AesGcmDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, #endif #ifdef HAVE_INTEL_AVX1 if (IS_INTEL_AVX1(intel_flags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; AES_GCM_decrypt_avx1(in, out, authIn, iv, authTag, sz, authInSz, ivSz, authTagSz, (byte*)aes->key, aes->rounds, &res); RESTORE_VECTOR_REGISTERS(); @@ -8373,8 +8382,9 @@ static int AesGcmInit_aesni(Aes* aes, const byte* iv, word32 ivSz) #ifdef HAVE_INTEL_AVX2 if (IS_INTEL_AVX2(intel_flags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; AES_GCM_init_avx2((byte*)aes->key, aes->rounds, iv, ivSz, aes->H, AES_COUNTER(aes), AES_INITCTR(aes)); RESTORE_VECTOR_REGISTERS(); @@ -8383,8 +8393,9 @@ static int AesGcmInit_aesni(Aes* aes, const byte* iv, word32 ivSz) #endif #ifdef HAVE_INTEL_AVX1 if (IS_INTEL_AVX1(intel_flags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; AES_GCM_init_avx1((byte*)aes->key, aes->rounds, iv, ivSz, aes->H, AES_COUNTER(aes), AES_INITCTR(aes)); RESTORE_VECTOR_REGISTERS(); @@ -8392,7 +8403,8 @@ static int AesGcmInit_aesni(Aes* aes, const byte* iv, word32 ivSz) else #endif { - if (SAVE_VECTOR_REGISTERS() != 0) + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) return BAD_STATE_E; AES_GCM_init_aesni((byte*)aes->key, aes->rounds, iv, ivSz, aes->H, AES_COUNTER(aes), AES_INITCTR(aes)); @@ -8532,8 +8544,9 @@ static int AesGcmEncryptUpdate_aesni(Aes* aes, byte* c, const byte* p, word32 blocks; int partial; - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; /* Hash in A, the Authentication Data */ AesGcmAadUpdate_aesni(aes, a, aSz, (cSz > 0) && (c != NULL)); @@ -8659,8 +8672,9 @@ static int AesGcmEncryptFinal_aesni(Aes* aes, byte* authTag, word32 authTagSz) /* AAD block incomplete when > 0 */ byte over = aes->aOver; - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; if (aes->cOver > 0) { /* Cipher text block incomplete. */ over = aes->cOver; @@ -8758,8 +8772,9 @@ static int AesGcmDecryptUpdate_aesni(Aes* aes, byte* p, const byte* c, word32 blocks; int partial; - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; /* Hash in A, the Authentication Data */ AesGcmAadUpdate_aesni(aes, a, aSz, (cSz > 0) && (c != NULL)); @@ -8893,8 +8908,8 @@ static int AesGcmDecryptFinal_aesni(Aes* aes, const byte* authTag, byte over = aes->aOver; byte *lastBlock = AES_LASTGBLOCK(aes); - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; if (aes->cOver > 0) { /* Cipher text block incomplete. */ over = aes->cOver; @@ -9893,8 +9908,9 @@ int wc_AesCcmEncrypt(Aes* aes, byte* out, const byte* in, word32 inSz, B[15] = 1; #ifdef WOLFSSL_AESNI if (haveAESNI && aes->use_aesni) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; while (inSz >= AES_BLOCK_SIZE * 4) { AesCcmCtrIncSet4(B, lenSz); @@ -9979,8 +9995,9 @@ int wc_AesCcmDecrypt(Aes* aes, byte* out, const byte* in, word32 inSz, #ifdef WOLFSSL_AESNI if (haveAESNI && aes->use_aesni) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; while (oSz >= AES_BLOCK_SIZE * 4) { AesCcmCtrIncSet4(B, lenSz); @@ -10392,8 +10409,8 @@ int wc_AesEcbEncrypt(Aes* aes, byte* out, const byte* in, word32 sz) if ((in == NULL) || (out == NULL) || (aes == NULL)) return BAD_FUNC_ARG; - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; ret = _AesEcbEncrypt(aes, out, in, sz); RESTORE_VECTOR_REGISTERS(); @@ -10407,8 +10424,8 @@ int wc_AesEcbDecrypt(Aes* aes, byte* out, const byte* in, word32 sz) if ((in == NULL) || (out == NULL) || (aes == NULL)) return BAD_FUNC_ARG; - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; ret = _AesEcbDecrypt(aes, out, in, sz); RESTORE_VECTOR_REGISTERS(); @@ -10437,6 +10454,7 @@ static int wc_AesFeedbackEncrypt(Aes* aes, byte* out, const byte* in, #ifdef WOLFSSL_AES_CFB byte* reg = NULL; #endif + int ret; if (aes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; @@ -10462,8 +10480,8 @@ static int wc_AesFeedbackEncrypt(Aes* aes, byte* out, const byte* in, sz--; } - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; while (sz >= AES_BLOCK_SIZE) { /* Using aes->tmp here for inline case i.e. in=out */ @@ -10533,6 +10551,7 @@ static int wc_AesFeedbackDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, byte mode) { byte* tmp; + int ret; if (aes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; @@ -10554,8 +10573,8 @@ static int wc_AesFeedbackDecrypt(Aes* aes, byte* out, const byte* in, word32 sz, sz--; } - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; while (sz > AES_BLOCK_SIZE) { /* Using aes->tmp here for inline case i.e. in=out */ @@ -10673,6 +10692,7 @@ static int wc_AesFeedbackCFB8(Aes* aes, byte* out, const byte* in, word32 sz, byte dir) { byte *pt; + int ret; if (aes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; @@ -10682,8 +10702,8 @@ static int wc_AesFeedbackCFB8(Aes* aes, byte* out, const byte* in, return 0; } - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; while (sz > 0) { wc_AesEncryptDirect(aes, (byte*)aes->tmp, (byte*)aes->reg); @@ -10727,6 +10747,7 @@ static int wc_AesFeedbackCFB1(Aes* aes, byte* out, const byte* in, byte cur = 0; /* hold current work in order to handle inline in=out */ byte* pt; int bit = 7; + int ret; if (aes == NULL || out == NULL || in == NULL) { return BAD_FUNC_ARG; @@ -10736,8 +10757,8 @@ static int wc_AesFeedbackCFB1(Aes* aes, byte* out, const byte* in, return 0; } - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; while (sz > 0) { wc_AesEncryptDirect(aes, (byte*)aes->tmp, (byte*)aes->reg); @@ -10942,6 +10963,8 @@ int wc_AesKeyWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out, byte t[KEYWRAP_BLOCK_SIZE]; byte tmp[AES_BLOCK_SIZE]; + int ret; + /* n must be at least 2 64-bit blocks, output size is (n + 1) 8 bytes (64-bit) */ if (aes == NULL || in == NULL || inSz < 2*KEYWRAP_BLOCK_SIZE || out == NULL || outSz < (inSz + KEYWRAP_BLOCK_SIZE)) @@ -10962,8 +10985,8 @@ int wc_AesKeyWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out, XMEMCPY(tmp, iv, KEYWRAP_BLOCK_SIZE); } - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; for (j = 0; j <= 5; j++) { for (i = 1; i <= inSz / KEYWRAP_BLOCK_SIZE; i++) { @@ -11048,6 +11071,8 @@ int wc_AesKeyUnWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6 }; + int ret; + if (aes == NULL || in == NULL || inSz < 3 * KEYWRAP_BLOCK_SIZE || out == NULL || outSz < (inSz - KEYWRAP_BLOCK_SIZE)) return BAD_FUNC_ARG; @@ -11067,8 +11092,8 @@ int wc_AesKeyUnWrap_ex(Aes *aes, const byte* in, word32 inSz, byte* out, XMEMCPY(out, in + KEYWRAP_BLOCK_SIZE, inSz - KEYWRAP_BLOCK_SIZE); XMEMSET(t, 0, sizeof(t)); - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; /* initialize counter to 6n */ n = (inSz - 1) / KEYWRAP_BLOCK_SIZE; @@ -11351,8 +11376,8 @@ int wc_AesXtsEncrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES * key setup passed to encrypt direct*/ - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; wc_AesEncryptDirect(tweak, tmp, i); @@ -11467,8 +11492,8 @@ int wc_AesXtsDecrypt(XtsAes* xaes, byte* out, const byte* in, word32 sz, XMEMSET(tmp, 0, AES_BLOCK_SIZE); /* set to 0's in case of improper AES * key setup passed to decrypt direct*/ - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; wc_AesEncryptDirect(tweak, tmp, i); diff --git a/wolfcrypt/src/chacha.c b/wolfcrypt/src/chacha.c index 9415de658..cc5a5a7d1 100644 --- a/wolfcrypt/src/chacha.c +++ b/wolfcrypt/src/chacha.c @@ -418,16 +418,18 @@ int wc_Chacha_Process(ChaCha* ctx, byte* output, const byte* input, #ifdef HAVE_INTEL_AVX2 if (IS_INTEL_AVX2(cpuidFlags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; chacha_encrypt_avx2(ctx, input, output, msglen); RESTORE_VECTOR_REGISTERS(); return 0; } #endif if (IS_INTEL_AVX1(cpuidFlags)) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; chacha_encrypt_avx1(ctx, input, output, msglen); RESTORE_VECTOR_REGISTERS(); return 0; diff --git a/wolfcrypt/src/cmac.c b/wolfcrypt/src/cmac.c index 71ec650d0..8cb9366c7 100644 --- a/wolfcrypt/src/cmac.c +++ b/wolfcrypt/src/cmac.c @@ -190,10 +190,10 @@ int wc_CmacUpdate(Cmac* cmac, const byte* in, word32 inSz) #endif cmac->totalSz += AES_BLOCK_SIZE; cmac->bufferSz = 0; - } #ifdef WOLFSSL_LINUXKM - } + } #endif + } } return ret; diff --git a/wolfcrypt/src/curve25519.c b/wolfcrypt/src/curve25519.c index fc2db01d3..6dc402cdd 100644 --- a/wolfcrypt/src/curve25519.c +++ b/wolfcrypt/src/curve25519.c @@ -128,8 +128,8 @@ int wc_curve25519_make_pub(int public_size, byte* pub, int private_size, #else fe_init(); - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; ret = curve25519(pub, priv, kCurve25519BasePoint); @@ -171,8 +171,8 @@ int wc_curve25519_generic(int public_size, byte* pub, fe_init(); - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; ret = curve25519(pub, priv, basepoint); @@ -289,8 +289,8 @@ int wc_curve25519_shared_secret_ex(curve25519_key* private_key, ret = nxp_ltc_curve25519(&o, private_key->k, &public_key->p, kLTC_Curve25519); #else - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; ret = curve25519(o.point, private_key->k, public_key->p.point); diff --git a/wolfcrypt/src/memory.c b/wolfcrypt/src/memory.c index 12e42b54e..a5ebeaf40 100644 --- a/wolfcrypt/src/memory.c +++ b/wolfcrypt/src/memory.c @@ -1139,42 +1139,60 @@ union fpregs_state **wolfcrypt_irq_fpu_states = NULL; #if defined(WOLFSSL_LINUXKM_SIMD_X86) && defined(WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED) - static __must_check inline int am_in_hard_interrupt_handler(void) { + static WARN_UNUSED_RESULT inline int am_in_hard_interrupt_handler(void) + { return (preempt_count() & (NMI_MASK | HARDIRQ_MASK)) != 0; } - __must_check int allocate_wolfcrypt_irq_fpu_states(void) { - wolfcrypt_irq_fpu_states = (union fpregs_state **)kzalloc(nr_cpu_ids * sizeof(struct fpu_state *), GFP_KERNEL); + WARN_UNUSED_RESULT int allocate_wolfcrypt_irq_fpu_states(void) + { + wolfcrypt_irq_fpu_states = + (union fpregs_state **)kzalloc(nr_cpu_ids + * sizeof(struct fpu_state *), + GFP_KERNEL); if (! wolfcrypt_irq_fpu_states) { - pr_err("warning, allocation of %lu bytes for wolfcrypt_irq_fpu_states failed.\n", nr_cpu_ids * sizeof(struct fpu_state *)); + pr_err("warning, allocation of %lu bytes for " + "wolfcrypt_irq_fpu_states failed.\n", + nr_cpu_ids * sizeof(struct fpu_state *)); return MEMORY_E; } { - unsigned int i; + typeof(nr_cpu_ids) i; for (i=0; istate, which has - * stringent alignment requirements (64 byte cache line), but takes - * a pointer to the parent struct. work around this. + /* save_fpregs_to_fpstate() only accesses fpu->state, which + * has stringent alignment requirements (64 byte cache + * line), but takes a pointer to the parent struct. work + * around this. */ - struct fpu *fake_fpu_pointer = (struct fpu *)(((char *)wolfcrypt_irq_fpu_states[processor_id]) - offsetof(struct fpu, state)); + struct fpu *fake_fpu_pointer = + (struct fpu *)(((char *)wolfcrypt_irq_fpu_states[processor_id]) + - offsetof(struct fpu, state)); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) copy_fpregs_to_fpstate(fake_fpu_pointer); #else save_fpregs_to_fpstate(fake_fpu_pointer); #endif } - ((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 1; /* mark the slot as used. */ - /* note, not preempt_enable()ing, mirroring kernel_fpu_begin() semantics. */ + /* mark the slot as used. */ + ((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 1; + /* note, not preempt_enable()ing, mirroring kernel_fpu_begin() + * semantics. + */ return 0; } preempt_enable(); - return EPERM; + return BAD_STATE_E; } else { kernel_fpu_begin(); - preempt_enable(); /* kernel_fpu_begin() does its own preempt_disable(). decrement ours. */ + preempt_enable(); /* kernel_fpu_begin() does its own + * preempt_disable(). decrement ours. + */ return 0; } } - void restore_vector_registers_x86(void) { + void restore_vector_registers_x86(void) + { if (am_in_hard_interrupt_handler()) { int processor_id = __smp_processor_id(); - if (((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1]) { + if ((wolfcrypt_irq_fpu_states == NULL) || + (wolfcrypt_irq_fpu_states[processor_id] == NULL) || + (((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] == 0)) + { + pr_err("restore_vector_registers_x86 called for cpu id %d " + "without saved context.\n", processor_id); + preempt_enable(); /* just in case */ + return; + } else { #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 14, 0) copy_kernel_to_fpregs(wolfcrypt_irq_fpu_states[processor_id]); #else - __restore_fpregs_from_fpstate(wolfcrypt_irq_fpu_states[processor_id], xfeatures_mask_all); + __restore_fpregs_from_fpstate(wolfcrypt_irq_fpu_states[processor_id], + xfeatures_mask_all); #endif ((char *)wolfcrypt_irq_fpu_states[processor_id])[PAGE_SIZE-1] = 0; preempt_enable(); return; - } else { - pr_err("restore_vector_registers_x86 called for cpu id %d without saved context.\n", processor_id); - preempt_enable(); /* just in case */ - return; } } kernel_fpu_end(); diff --git a/wolfcrypt/src/poly1305.c b/wolfcrypt/src/poly1305.c index c49d098e4..29e5c7dc5 100644 --- a/wolfcrypt/src/poly1305.c +++ b/wolfcrypt/src/poly1305.c @@ -267,8 +267,9 @@ static int poly1305_blocks(Poly1305* ctx, const unsigned char *m, { #ifdef USE_INTEL_SPEEDUP /* AVX2 is handled in wc_Poly1305Update. */ - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; poly1305_blocks_avx(ctx, m, bytes); RESTORE_VECTOR_REGISTERS(); return 0; @@ -402,8 +403,9 @@ static int poly1305_block(Poly1305* ctx, const unsigned char *m) { #ifdef USE_INTEL_SPEEDUP /* No call to poly1305_block when AVX2, AVX2 does 4 blocks at a time. */ - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret= SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; poly1305_block_avx(ctx, m); RESTORE_VECTOR_REGISTERS(); return 0; @@ -442,8 +444,11 @@ int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) intel_flags = cpuid_get_flags(); cpu_flags_set = 1; } - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + { + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; + } #ifdef HAVE_INTEL_AVX2 if (IS_INTEL_AVX2(intel_flags)) poly1305_setkey_avx2(ctx, key); @@ -506,6 +511,7 @@ int wc_Poly1305SetKey(Poly1305* ctx, const byte* key, word32 keySz) int wc_Poly1305Final(Poly1305* ctx, byte* mac) { #ifdef USE_INTEL_SPEEDUP + int ret; #elif defined(POLY130564) word64 h0,h1,h2,c; @@ -525,8 +531,8 @@ int wc_Poly1305Final(Poly1305* ctx, byte* mac) return BAD_FUNC_ARG; #ifdef USE_INTEL_SPEEDUP - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; #ifdef HAVE_INTEL_AVX2 if (IS_INTEL_AVX2(intel_flags)) poly1305_final_avx2(ctx, mac); @@ -714,9 +720,9 @@ int wc_Poly1305Update(Poly1305* ctx, const byte* m, word32 bytes) #ifdef USE_INTEL_SPEEDUP #ifdef HAVE_INTEL_AVX2 if (IS_INTEL_AVX2(intel_flags)) { - - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + int ret = SAVE_VECTOR_REGISTERS(); + if (ret != 0) + return ret; /* handle leftover */ diff --git a/wolfcrypt/src/pwdbased.c b/wolfcrypt/src/pwdbased.c index cd3fa2491..66cc63ac5 100644 --- a/wolfcrypt/src/pwdbased.c +++ b/wolfcrypt/src/pwdbased.c @@ -368,11 +368,17 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen, byte* buffer = staticBuffer; #ifdef WOLFSSL_SMALL_STACK - byte* Ai; - byte* B; + byte* Ai = NULL; + byte* B = NULL; + mp_int *B1 = NULL; + mp_int *i1 = NULL; + mp_int *res = NULL; #else byte Ai[WC_MAX_DIGEST_SIZE]; byte B[WC_MAX_BLOCK_SIZE]; + mp_int B1[1]; + mp_int i1[1]; + mp_int res[1]; #endif enum wc_HashType hashT; @@ -449,9 +455,20 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen, for (i = 0; i < (int)pLen; i++) P[i] = passwd[i % passLen]; +#ifdef WOLFSSL_SMALL_STACK + if (((B1 = (mp_int *)XMALLOC(sizeof(*B1), heap, DYNAMIC_TYPE_TMP_BUFFER)) + == NULL) || + ((i1 = (mp_int *)XMALLOC(sizeof(*i1), heap, DYNAMIC_TYPE_TMP_BUFFER)) + == NULL) || + ((res = (mp_int *)XMALLOC(sizeof(*res), heap, DYNAMIC_TYPE_TMP_BUFFER)) + == NULL)) { + ret = MEMORY_E; + goto out; + } +#endif + while (kLen > 0) { word32 currentLen; - mp_int B1; ret = DoPKCS12Hash(hashType, buffer, totalLen, Ai, u, iterations); if (ret < 0) @@ -460,55 +477,53 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen, for (i = 0; i < (int)v; i++) B[i] = Ai[i % u]; - if (mp_init(&B1) != MP_OKAY) + if (mp_init(B1) != MP_OKAY) ret = MP_INIT_E; - else if (mp_read_unsigned_bin(&B1, B, v) != MP_OKAY) + else if (mp_read_unsigned_bin(B1, B, v) != MP_OKAY) ret = MP_READ_E; - else if (mp_add_d(&B1, (mp_digit)1, &B1) != MP_OKAY) + else if (mp_add_d(B1, (mp_digit)1, B1) != MP_OKAY) ret = MP_ADD_E; if (ret != 0) { - mp_clear(&B1); + mp_clear(B1); break; } for (i = 0; i < (int)iLen; i += v) { int outSz; - mp_int i1; - mp_int res; - if (mp_init_multi(&i1, &res, NULL, NULL, NULL, NULL) != MP_OKAY) { + if (mp_init_multi(i1, res, NULL, NULL, NULL, NULL) != MP_OKAY) { ret = MP_INIT_E; break; } - if (mp_read_unsigned_bin(&i1, I + i, v) != MP_OKAY) + if (mp_read_unsigned_bin(i1, I + i, v) != MP_OKAY) ret = MP_READ_E; - else if (mp_add(&i1, &B1, &res) != MP_OKAY) + else if (mp_add(i1, B1, res) != MP_OKAY) ret = MP_ADD_E; - else if ( (outSz = mp_unsigned_bin_size(&res)) < 0) + else if ( (outSz = mp_unsigned_bin_size(res)) < 0) ret = MP_TO_E; else { if (outSz > (int)v) { /* take off MSB */ byte tmp[WC_MAX_BLOCK_SIZE + 1]; - ret = mp_to_unsigned_bin(&res, tmp); + ret = mp_to_unsigned_bin(res, tmp); XMEMCPY(I + i, tmp + 1, v); } else if (outSz < (int)v) { XMEMSET(I + i, 0, v - outSz); - ret = mp_to_unsigned_bin(&res, I + i + v - outSz); + ret = mp_to_unsigned_bin(res, I + i + v - outSz); } else - ret = mp_to_unsigned_bin(&res, I + i); + ret = mp_to_unsigned_bin(res, I + i); } - mp_clear(&i1); - mp_clear(&res); + mp_clear(i1); + mp_clear(res); if (ret < 0) break; } if (ret < 0) { - mp_clear(&B1); + mp_clear(B1); break; } @@ -516,16 +531,27 @@ int wc_PKCS12_PBKDF_ex(byte* output, const byte* passwd, int passLen, XMEMCPY(output, Ai, currentLen); output += currentLen; kLen -= currentLen; - mp_clear(&B1); + mp_clear(B1); } - if (dynamic) XFREE(buffer, heap, DYNAMIC_TYPE_KEY); - #ifdef WOLFSSL_SMALL_STACK - XFREE(Ai, heap, DYNAMIC_TYPE_TMP_BUFFER); - XFREE(B, heap, DYNAMIC_TYPE_TMP_BUFFER); + out: + + if (Ai) + XFREE(Ai, heap, DYNAMIC_TYPE_TMP_BUFFER); + if (B) + XFREE(B, heap, DYNAMIC_TYPE_TMP_BUFFER); + if (B1) + XFREE(B1, heap, DYNAMIC_TYPE_TMP_BUFFER); + if (i1) + XFREE(i1, heap, DYNAMIC_TYPE_TMP_BUFFER); + if (res) + XFREE(res, heap, DYNAMIC_TYPE_TMP_BUFFER); #endif + if (dynamic) + XFREE(buffer, heap, DYNAMIC_TYPE_KEY); + return ret; } diff --git a/wolfcrypt/src/rsa.c b/wolfcrypt/src/rsa.c index 96be07262..a85a7b8da 100644 --- a/wolfcrypt/src/rsa.c +++ b/wolfcrypt/src/rsa.c @@ -4097,7 +4097,11 @@ int wc_CheckProbablePrime_ex(const byte* pRaw, word32 pRawSz, const byte* eRaw, word32 eRawSz, int nlen, int* isPrime, WC_RNG* rng) { - mp_int p, q, e; +#ifdef WOLFSSL_SMALL_STACK + mp_int *p = NULL, *q = NULL, *e = NULL; +#else + mp_int p[1], q[1], e[1]; +#endif mp_int* Q = NULL; int ret; @@ -4111,30 +4115,54 @@ int wc_CheckProbablePrime_ex(const byte* pRaw, word32 pRawSz, if ((qRaw != NULL && qRawSz == 0) || (qRaw == NULL && qRawSz != 0)) return BAD_FUNC_ARG; - ret = mp_init_multi(&p, &q, &e, NULL, NULL, NULL); +#ifdef WOLFSSL_SMALL_STACK + if (((p = (mp_int *)XMALLOC(sizeof(*p), NULL, DYNAMIC_TYPE_RSA_BUFFER)) == NULL) || + ((q = (mp_int *)XMALLOC(sizeof(*q), NULL, DYNAMIC_TYPE_RSA_BUFFER)) == NULL) || + ((e = (mp_int *)XMALLOC(sizeof(*e), NULL, DYNAMIC_TYPE_RSA_BUFFER)) == NULL)) + ret = MEMORY_E; + else + ret = 0; + if (ret == 0) +#endif + ret = mp_init_multi(p, q, e, NULL, NULL, NULL); if (ret == MP_OKAY) - ret = mp_read_unsigned_bin(&p, pRaw, pRawSz); + ret = mp_read_unsigned_bin(p, pRaw, pRawSz); if (ret == MP_OKAY) { if (qRaw != NULL) { - ret = mp_read_unsigned_bin(&q, qRaw, qRawSz); + ret = mp_read_unsigned_bin(q, qRaw, qRawSz); if (ret == MP_OKAY) - Q = &q; + Q = q; } } if (ret == MP_OKAY) - ret = mp_read_unsigned_bin(&e, eRaw, eRawSz); + ret = mp_read_unsigned_bin(e, eRaw, eRawSz); if (ret == MP_OKAY) - ret = _CheckProbablePrime(&p, Q, &e, nlen, isPrime, rng); + ret = _CheckProbablePrime(p, Q, e, nlen, isPrime, rng); ret = (ret == MP_OKAY) ? 0 : PRIME_GEN_E; - mp_clear(&p); - mp_clear(&q); - mp_clear(&e); +#ifdef WOLFSSL_SMALL_STACK + if (p) { + mp_clear(p); + XFREE(p, NULL, DYNAMIC_TYPE_RSA_BUFFER); + } + if (q) { + mp_clear(q); + XFREE(q, NULL, DYNAMIC_TYPE_RSA_BUFFER); + } + if (e) { + mp_clear(e); + XFREE(e, NULL, DYNAMIC_TYPE_RSA_BUFFER); + } +#else + mp_clear(p); + mp_clear(q); + mp_clear(e); +#endif return ret; } diff --git a/wolfcrypt/src/sha256.c b/wolfcrypt/src/sha256.c index aed2d2c60..5b18da588 100644 --- a/wolfcrypt/src/sha256.c +++ b/wolfcrypt/src/sha256.c @@ -320,8 +320,8 @@ static int InitSha256(wc_Sha256* sha256) static WC_INLINE int inline_XTRANSFORM(wc_Sha256* S, const byte* D) { int ret; if (Transform_Sha256_is_vectorized) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; } ret = (*Transform_Sha256_p)(S, D); if (Transform_Sha256_is_vectorized) @@ -333,8 +333,8 @@ static int InitSha256(wc_Sha256* sha256) static WC_INLINE int inline_XTRANSFORM_LEN(wc_Sha256* S, const byte* D, word32 L) { int ret; if (Transform_Sha256_is_vectorized) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; } ret = (*Transform_Sha256_Len_p)(S, D, L); if (Transform_Sha256_is_vectorized) diff --git a/wolfcrypt/src/sha512.c b/wolfcrypt/src/sha512.c index 2b428c3c1..802d50bc6 100644 --- a/wolfcrypt/src/sha512.c +++ b/wolfcrypt/src/sha512.c @@ -449,8 +449,8 @@ static int InitSha512_256(wc_Sha512* sha512) static WC_INLINE int Transform_Sha512(wc_Sha512 *sha512) { int ret; if (Transform_Sha512_is_vectorized) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; } ret = (*Transform_Sha512_p)(sha512); if (Transform_Sha512_is_vectorized) @@ -460,8 +460,8 @@ static int InitSha512_256(wc_Sha512* sha512) static WC_INLINE int Transform_Sha512_Len(wc_Sha512 *sha512, word32 len) { int ret; if (Transform_Sha512_is_vectorized) { - if (SAVE_VECTOR_REGISTERS() != 0) - return BAD_STATE_E; + if ((ret = SAVE_VECTOR_REGISTERS()) != 0) + return ret; } ret = (*Transform_Sha512_Len_p)(sha512, len); if (Transform_Sha512_is_vectorized) diff --git a/wolfcrypt/src/wc_port.c b/wolfcrypt/src/wc_port.c index 8e1afb46e..f9a490448 100644 --- a/wolfcrypt/src/wc_port.c +++ b/wolfcrypt/src/wc_port.c @@ -2454,7 +2454,11 @@ time_t time(time_t * timer) ret = ts.tv_sec; #else struct timespec64 ts; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) + ts = current_kernel_time64(); +#else ktime_get_coarse_real_ts64(&ts); +#endif ret = ts.tv_sec; #endif if (timer) diff --git a/wolfssl/wolfcrypt/types.h b/wolfssl/wolfcrypt/types.h index 0a2af8fab..5ecb1949b 100644 --- a/wolfssl/wolfcrypt/types.h +++ b/wolfssl/wolfcrypt/types.h @@ -320,6 +320,16 @@ decouple library dependencies with standard string, memory and so on. #define FALL_THROUGH #endif + #ifndef WARN_UNUSED_RESULT + #if defined(WOLFSSL_LINUXKM) && defined(__must_check) + #define WARN_UNUSED_RESULT __must_check + #elif defined(__GNUC__) && (__GNUC__ >= 4) + #define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) + #else + #define WARN_UNUSED_RESULT + #endif + #endif /* WARN_UNUSED_RESULT */ + /* Micrium will use Visual Studio for compilation but not the Win32 API */ #if defined(_WIN32) && !defined(MICRIUM) && !defined(FREERTOS) && \ !defined(FREERTOS_TCP) && !defined(EBSNET) && \ diff --git a/wolfssl/wolfcrypt/wc_port.h b/wolfssl/wolfcrypt/wc_port.h index f0cffdcb3..9f5683b63 100644 --- a/wolfssl/wolfcrypt/wc_port.h +++ b/wolfssl/wolfcrypt/wc_port.h @@ -133,7 +133,9 @@ #else #include #endif - #include + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) + #include + #endif #ifndef SAVE_VECTOR_REGISTERS #define SAVE_VECTOR_REGISTERS() save_vector_registers_x86() #endif @@ -263,7 +265,13 @@ typeof(kmalloc_order_trace) *kmalloc_order_trace; typeof(get_random_bytes) *get_random_bytes; - typeof(ktime_get_coarse_real_ts64) *ktime_get_coarse_real_ts64; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + typeof(getnstimeofday) *getnstimeofday; + #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) + typeof(current_kernel_time64) *current_kernel_time64; + #else + typeof(ktime_get_coarse_real_ts64) *ktime_get_coarse_real_ts64; + #endif struct task_struct *(*get_current)(void); int (*preempt_count)(void); @@ -296,8 +304,15 @@ #endif /* WOLFSSL_LINUXKM_SIMD_X86 */ typeof(__mutex_init) *__mutex_init; - typeof(mutex_lock) *mutex_lock; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + typeof(mutex_lock_nested) *mutex_lock_nested; + #else + typeof(mutex_lock) *mutex_lock; + #endif typeof(mutex_unlock) *mutex_unlock; + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + typeof(mutex_destroy) *mutex_destroy; + #endif #ifdef HAVE_FIPS typeof(wolfCrypt_FIPS_first) *wolfCrypt_FIPS_first; @@ -369,7 +384,13 @@ #define kmalloc_order_trace (wolfssl_linuxkm_get_pie_redirect_table()->kmalloc_order_trace) #define get_random_bytes (wolfssl_linuxkm_get_pie_redirect_table()->get_random_bytes) - #define ktime_get_coarse_real_ts64 (wolfssl_linuxkm_get_pie_redirect_table()->ktime_get_coarse_real_ts64) + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + #define getnstimeofday (wolfssl_linuxkm_get_pie_redirect_table()->getnstimeofday) + #elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) + #define current_kernel_time64 (wolfssl_linuxkm_get_pie_redirect_table()->current_kernel_time64) + #else + #define ktime_get_coarse_real_ts64 (wolfssl_linuxkm_get_pie_redirect_table()->ktime_get_coarse_real_ts64) + #endif #undef get_current #define get_current (wolfssl_linuxkm_get_pie_redirect_table()->get_current) @@ -399,8 +420,15 @@ #endif #define __mutex_init (wolfssl_linuxkm_get_pie_redirect_table()->__mutex_init) - #define mutex_lock (wolfssl_linuxkm_get_pie_redirect_table()->mutex_lock) + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + #define mutex_lock_nested (wolfssl_linuxkm_get_pie_redirect_table()->mutex_lock_nested) + #else + #define mutex_lock (wolfssl_linuxkm_get_pie_redirect_table()->mutex_lock) + #endif #define mutex_unlock (wolfssl_linuxkm_get_pie_redirect_table()->mutex_unlock) + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + #define mutex_destroy (wolfssl_linuxkm_get_pie_redirect_table()->mutex_destroy) + #endif /* per linux/ctype.h, tolower() and toupper() are macros bound to static inlines * that use macros that bring in the _ctype global. for __PIE__, this needs to @@ -432,35 +460,37 @@ extern __must_check int save_vector_registers_x86(void); extern void restore_vector_registers_x86(void); #else /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */ - static __must_check inline int save_vector_registers_x86(void) { - preempt_disable(); - if (! irq_fpu_usable()) { - preempt_enable(); - return EPERM; - } else { - kernel_fpu_begin(); - preempt_enable(); /* kernel_fpu_begin() does its own preempt_disable(). decrement ours. */ - return 0; - } - } - static inline void restore_vector_registers_x86(void) { - kernel_fpu_end(); - } + #define save_vector_registers_x86() ({ \ + int _ret; \ + preempt_disable(); \ + if (! irq_fpu_usable()) { \ + preempt_enable(); \ + _ret = BAD_STATE_E; \ + } else { \ + kernel_fpu_begin(); \ + preempt_enable(); /* kernel_fpu_begin() does its own preempt_disable(). decrement ours. */ \ + _ret = 0; \ + } \ + _ret; \ + }) + #define restore_vector_registers_x86() kernel_fpu_end() #endif /* !WOLFSSL_LINUXKM_SIMD_X86_IRQ_ALLOWED */ #elif defined(CONFIG_ARM) || defined(CONFIG_ARM64) - static __must_check inline int save_vector_registers_arm(void) { + static WARN_UNUSED_RESULT inline int save_vector_registers_arm(void) + { preempt_disable(); if (! may_use_simd()) { preempt_enable(); - return EPERM; + return BAD_STATE_E; } else { fpsimd_preserve_current_state(); return 0; } } - static inline void restore_vector_registers_arm(void) { + static inline void restore_vector_registers_arm(void) + { fpsimd_restore_current_state(); preempt_enable(); } @@ -469,7 +499,6 @@ #endif /* WOLFSSL_LINUXKM_SIMD */ - /* Linux headers define these using C expressions, but we need * them to be evaluable by the preprocessor, for use in sp_int.h. */ @@ -565,8 +594,6 @@ extern void fipsEntry(void); #endif - #endif /* BUILDING_WOLFSSL */ - /* needed to suppress inclusion of stdio.h in wolfssl/wolfcrypt/types.h */ #define XSNPRINTF snprintf @@ -580,6 +607,16 @@ (int)_xatoi_res; \ }) + + /* suppress false-positive "writing 1 byte into a region of size 0" warnings + * building old kernels with new gcc: + */ + #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) + _Pragma("GCC diagnostic ignored \"-Wstringop-overflow\""); + #endif + + #endif /* BUILDING_WOLFSSL */ + #else /* ! WOLFSSL_LINUXKM */ #ifndef SAVE_VECTOR_REGISTERS