From 26ba6344f24d3d270bf0992c8b2771dacfba81f1 Mon Sep 17 00:00:00 2001 From: Daniel Pouzzner Date: Thu, 13 Nov 2025 16:25:49 -0600 Subject: [PATCH 1/3] add wolfSSL_Atomic_Ptr_CompareExchange(); mitigate race on ctx->privateKeyPKey in wolfSSL_CTX_get0_privatekey(). --- src/ssl.c | 17 ++++++++++++-- wolfcrypt/src/wc_port.c | 44 +++++++++++++++++++++++++++++++++++++ wolfssl/wolfcrypt/wc_port.h | 14 ++++++++++++ 3 files changed, 73 insertions(+), 2 deletions(-) diff --git a/src/ssl.c b/src/ssl.c index c201e30b3..d02f2d322 100644 --- a/src/ssl.c +++ b/src/ssl.c @@ -7605,12 +7605,25 @@ WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(const WOLFSSL_CTX* ctx) #ifdef WOLFSSL_BLIND_PRIVATE_KEY wolfssl_priv_der_unblind(ctx->privateKey, ctx->privateKeyMask); #endif - res = wolfSSL_d2i_PrivateKey(type, - (WOLFSSL_EVP_PKEY**)&ctx->privateKeyPKey, &key, + res = wolfSSL_d2i_PrivateKey(type, NULL, &key, (long)ctx->privateKey->length); #ifdef WOLFSSL_BLIND_PRIVATE_KEY wolfssl_priv_der_unblind(ctx->privateKey, ctx->privateKeyMask); #endif + if (res) { +#ifdef WOLFSSL_ATOMIC_OPS + WOLFSSL_EVP_PKEY *current_pkey = NULL; + if (! wolfSSL_Atomic_Ptr_CompareExchange( + (void **)&ctx->privateKeyPKey, + (void **)¤t_pkey, res)) + { + wolfSSL_EVP_PKEY_free(res); + res = current_pkey; + } +#else + ctx->privateKeyPKey = res; +#endif + } } return res; diff --git a/wolfcrypt/src/wc_port.c b/wolfcrypt/src/wc_port.c index df888c0d7..6ebae390f 100644 --- a/wolfcrypt/src/wc_port.c +++ b/wolfcrypt/src/wc_port.c @@ -1357,6 +1357,13 @@ int wolfSSL_Atomic_Uint_CompareExchange( c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); } +int wolfSSL_Atomic_Ptr_CompareExchange( + void **c, void **expected_ptr, void *new_ptr) +{ + return __atomic_compare_exchange_n( + c, expected_ptr, new_ptr, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + #else /* Default C Implementation */ @@ -1444,6 +1451,17 @@ int wolfSSL_Atomic_Uint_CompareExchange( c, expected_i, new_i, memory_order_seq_cst, memory_order_acquire); } +int wolfSSL_Atomic_Ptr_CompareExchange( + void **c, void **expected_ptr, void *new_ptr) +{ + /* use gcc-built-in __atomic_compare_exchange_n(), not + * atomic_compare_exchange_strong_explicit(), to sidestep _Atomic type + * requirements. + */ + return __atomic_compare_exchange_n( + c, expected_ptr, new_ptr, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + #endif /* __cplusplus */ #elif defined(_MSC_VER) @@ -1538,6 +1556,32 @@ int wolfSSL_Atomic_Uint_CompareExchange( } } +int wolfSSL_Atomic_Uint_CompareExchange( + void ** c, void **expected_ptr, void *new_ptr) +{ +#ifdef _WIN64 + LONG64 actual_ptr = InterlockedCompareExchange64 + ((LONG64 *)c, (LONG64)new_i, (LONG64)*expected_i); + if (actual_ptr == (LONG64)*expected_i) { + return 1; + } + else { + *expected_i = (void *)actual_ptr; + return 0; + } +#else /* !_WIN64 */ + LONG actual_ptr = InterlockedCompareExchange + ((LONG *)c, (LONG)new_i, (LONG)*expected_i); + if (actual_ptr == (LONG)*expected_i) { + return 1; + } + else { + *expected_i = (void *)actual_ptr; + return 0; + } +#endif /* !_WIN64 */ +} + #endif #endif /* WOLFSSL_ATOMIC_OPS */ diff --git a/wolfssl/wolfcrypt/wc_port.h b/wolfssl/wolfcrypt/wc_port.h index 91b5bc466..640c6b99b 100644 --- a/wolfssl/wolfcrypt/wc_port.h +++ b/wolfssl/wolfcrypt/wc_port.h @@ -586,6 +586,8 @@ wolfSSL_Atomic_Uint* c, unsigned int i); WOLFSSL_API int wolfSSL_Atomic_Uint_CompareExchange( wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i); + WOLFSSL_API int wolfSSL_Atomic_Ptr_CompareExchange( + void** c, void **expected_ptr, void *new_ptr); #else /* Code using these fallback implementations in non-SINGLE_THREADED builds * needs to arrange its own explicit fallback to int for wolfSSL_Atomic_Int @@ -623,6 +625,18 @@ return 0; } } + static WC_INLINE int wolfSSL_Atomic_Ptr_CompareExchange( + void **c, void *expected_ptr, void *new_ptr) + { + if (*(char **)c == *(char **)expected_ptr) { + *(char **)c = (char *)new_ptr; + return 1; + } + else { + *(char **)expected_ptr = *(char **)c; + return 0; + } + } static WC_INLINE unsigned int wolfSSL_Atomic_Uint_FetchAdd( unsigned int *c, unsigned int i) { From c430cc75eafa0eed36da169dc515607b4f7616d7 Mon Sep 17 00:00:00 2001 From: Daniel Pouzzner Date: Thu, 13 Nov 2025 17:11:52 -0600 Subject: [PATCH 2/3] src/ssl.c and wolfssl/ssl.h: fix signature on wolfSSL_CTX_get0_privatekey() -- ctx is not const; wolfcrypt/src/wc_port.c and wolfssl/wolfcrypt/wc_port.h: tweak gates on atomic implementations to maximize availability within currently supported targets; fix some whitespace. --- .wolfssl_known_macro_extras | 1 + src/ssl.c | 2 +- wolfcrypt/src/wc_port.c | 209 ++++++++++++++++++------------------ wolfssl/ssl.h | 2 +- wolfssl/wolfcrypt/wc_port.h | 48 ++++----- 5 files changed, 129 insertions(+), 133 deletions(-) diff --git a/.wolfssl_known_macro_extras b/.wolfssl_known_macro_extras index 477c265b3..1c7ed51d2 100644 --- a/.wolfssl_known_macro_extras +++ b/.wolfssl_known_macro_extras @@ -969,6 +969,7 @@ __ARCH_STRSTR_NO_REDIRECT __ARM_ARCH_7M__ __ARM_FEATURE_CRYPTO __ASSEMBLER__ +__ATOMIC_CONSUME __ATOMIC_RELAXED __AVR_ARCH__ __AVR__ diff --git a/src/ssl.c b/src/ssl.c index d02f2d322..cd9035de3 100644 --- a/src/ssl.c +++ b/src/ssl.c @@ -7559,7 +7559,7 @@ int wolfSSL_CTX_check_private_key(const WOLFSSL_CTX* ctx) * Return the private key of the WOLFSSL_CTX struct * @return WOLFSSL_EVP_PKEY* The caller doesn *NOT*` free the returned object. */ -WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(const WOLFSSL_CTX* ctx) +WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(WOLFSSL_CTX* ctx) { WOLFSSL_EVP_PKEY* res; const unsigned char *key; diff --git a/wolfcrypt/src/wc_port.c b/wolfcrypt/src/wc_port.c index 6ebae390f..93ca4bebe 100644 --- a/wolfcrypt/src/wc_port.c +++ b/wolfcrypt/src/wc_port.c @@ -1273,98 +1273,9 @@ char* wc_strdup_ex(const char *src, int memType) { #if defined(WOLFSSL_ATOMIC_OPS) && !defined(SINGLE_THREADED) -#ifdef HAVE_C___ATOMIC -/* Atomic ops using standard C lib */ -#ifdef __cplusplus -/* C++ using direct calls to compiler built-in functions */ -void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) -{ - *c = i; -} +#if defined(WOLFSSL_USER_DEFINED_ATOMICS) -void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i) -{ - *c = i; -} - -int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i, - int new_i) -{ - /* For the success path, use full synchronization with barriers -- - * "Sequentially-consistent ordering" -- so that all threads see the same - * "single total modification order of all atomic operations" -- but on - * failure we just need to be sure we acquire the value that changed out - * from under us. - */ - return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */, - __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); -} - -unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); -} - -unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); -} - -unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); -} - -unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Uint_CompareExchange( - wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i) -{ - /* For the success path, use full synchronization with barriers -- - * "Sequentially-consistent ordering" -- so that all threads see the same - * "single total modification order of all atomic operations" -- but on - * failure we just need to be sure we acquire the value that changed out - * from under us. - */ - return __atomic_compare_exchange_n( - c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); -} - -int wolfSSL_Atomic_Ptr_CompareExchange( - void **c, void **expected_ptr, void *new_ptr) -{ - return __atomic_compare_exchange_n( - c, expected_ptr, new_ptr, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); -} - -#else +#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) /* Default C Implementation */ void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) @@ -1459,12 +1370,102 @@ int wolfSSL_Atomic_Ptr_CompareExchange( * requirements. */ return __atomic_compare_exchange_n( - c, expected_ptr, new_ptr, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); + c, expected_ptr, new_ptr, 0 /* weak */, + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); } -#endif /* __cplusplus */ +#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) +/* direct calls using gcc-style compiler built-ins */ -#elif defined(_MSC_VER) +void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) +{ + *c = i; +} + +void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i) +{ + *c = i; +} + +int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i, + int new_i) +{ + /* For the success path, use full synchronization with barriers -- + * "Sequentially-consistent ordering" -- so that all threads see the same + * "single total modification order of all atomic operations" -- but on + * failure we just need to be sure we acquire the value that changed out + * from under us. + */ + return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */, + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + +unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); +} + +unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); +} + +unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); +} + +unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Uint_CompareExchange( + wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i) +{ + /* For the success path, use full synchronization with barriers -- + * "Sequentially-consistent ordering" -- so that all threads see the same + * "single total modification order of all atomic operations" -- but on + * failure we just need to be sure we acquire the value that changed out + * from under us. + */ + return __atomic_compare_exchange_n( + c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + +int wolfSSL_Atomic_Ptr_CompareExchange( + void **c, void **expected_ptr, void *new_ptr) +{ + return __atomic_compare_exchange_n( + c, expected_ptr, new_ptr, 0 /* weak */, + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + +#elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API) void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) { @@ -1545,8 +1546,8 @@ unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c, int wolfSSL_Atomic_Uint_CompareExchange( wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i) { - long actual_i = InterlockedCompareExchange - ((wolfSSL_Atomic_Int *)c, (long)new_i, (long)*expected_i); + long actual_i = InterlockedCompareExchange( + (wolfSSL_Atomic_Int *)c, (long)new_i, (long)*expected_i); if (actual_i == (long)*expected_i) { return 1; } @@ -1560,23 +1561,23 @@ int wolfSSL_Atomic_Uint_CompareExchange( void ** c, void **expected_ptr, void *new_ptr) { #ifdef _WIN64 - LONG64 actual_ptr = InterlockedCompareExchange64 - ((LONG64 *)c, (LONG64)new_i, (LONG64)*expected_i); - if (actual_ptr == (LONG64)*expected_i) { + LONG64 actual_ptr = InterlockedCompareExchange64( + (LONG64 *)c, (LONG64)new_ptr, (LONG64)*expected_ptr); + if (actual_ptr == (LONG64)*expected_ptr) { return 1; } else { - *expected_i = (void *)actual_ptr; + *expected_ptr = (void *)actual_ptr; return 0; } #else /* !_WIN64 */ - LONG actual_ptr = InterlockedCompareExchange - ((LONG *)c, (LONG)new_i, (LONG)*expected_i); - if (actual_ptr == (LONG)*expected_i) { + LONG actual_ptr = InterlockedCompareExchange( + (LONG *)c, (LONG)new_ptr, (LONG)*expected_ptr); + if (actual_ptr == (LONG)*expected_ptr) { return 1; } else { - *expected_i = (void *)actual_ptr; + *expected_ptr = (void *)actual_ptr; return 0; } #endif /* !_WIN64 */ diff --git a/wolfssl/ssl.h b/wolfssl/ssl.h index ca8f4f81d..3f3a98456 100644 --- a/wolfssl/ssl.h +++ b/wolfssl/ssl.h @@ -3228,7 +3228,7 @@ WOLFSSL_API int wolfSSL_want_write(WOLFSSL* ssl); #ifdef OPENSSL_EXTRA WOLFSSL_API int wolfSSL_want(WOLFSSL* ssl); -WOLFSSL_API WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(const WOLFSSL_CTX* ctx); +WOLFSSL_API WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(WOLFSSL_CTX* ctx); #include /* var_arg */ WOLFSSL_API int wolfSSL_BIO_vprintf(WOLFSSL_BIO* bio, const char* format, diff --git a/wolfssl/wolfcrypt/wc_port.h b/wolfssl/wolfcrypt/wc_port.h index 640c6b99b..0a56c1cd0 100644 --- a/wolfssl/wolfcrypt/wc_port.h +++ b/wolfssl/wolfcrypt/wc_port.h @@ -494,31 +494,25 @@ #define WOLFSSL_ATOMIC_LOAD(x) (x) #define WOLFSSL_ATOMIC_STORE(x, val) (x) = (val) #define WOLFSSL_ATOMIC_OPS - #elif defined(HAVE_C___ATOMIC) - #ifdef __cplusplus - #if defined(__GNUC__) && defined(__ATOMIC_RELAXED) - /* C++ using direct calls to compiler built-in functions */ - typedef volatile int wolfSSL_Atomic_Int; - typedef volatile unsigned int wolfSSL_Atomic_Uint; - #define WOLFSSL_ATOMIC_INITIALIZER(x) (x) - #define WOLFSSL_ATOMIC_LOAD(x) __atomic_load_n(&(x), \ - __ATOMIC_CONSUME) - #define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \ - val, __ATOMIC_RELEASE) - #define WOLFSSL_ATOMIC_OPS - #endif - #else - #ifdef WOLFSSL_HAVE_ATOMIC_H - /* Default C Implementation */ - #include - typedef atomic_int wolfSSL_Atomic_Int; - typedef atomic_uint wolfSSL_Atomic_Uint; - #define WOLFSSL_ATOMIC_INITIALIZER(x) (x) - #define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x)) - #define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val) - #define WOLFSSL_ATOMIC_OPS - #endif /* WOLFSSL_HAVE_ATOMIC_H */ - #endif + #elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) + /* Default C Implementation */ + #include + typedef atomic_int wolfSSL_Atomic_Int; + typedef atomic_uint wolfSSL_Atomic_Uint; + #define WOLFSSL_ATOMIC_INITIALIZER(x) (x) + #define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x)) + #define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val) + #define WOLFSSL_ATOMIC_OPS + #elif defined(__GNUC__) && defined(__ATOMIC_CONSUME) + /* direct calls using gcc-style compiler built-ins */ + typedef volatile int wolfSSL_Atomic_Int; + typedef volatile unsigned int wolfSSL_Atomic_Uint; + #define WOLFSSL_ATOMIC_INITIALIZER(x) (x) + #define WOLFSSL_ATOMIC_LOAD(x) __atomic_load_n(&(x), \ + __ATOMIC_CONSUME) + #define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \ + val, __ATOMIC_RELEASE) + #define WOLFSSL_ATOMIC_OPS #elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API) /* Use MSVC compiler intrinsics for atomic ops */ #ifdef _WIN32_WCE @@ -534,8 +528,8 @@ #define WOLFSSL_ATOMIC_OPS #endif - #ifndef WOLFSSL_ATOMIC_INITIALIZER - /* If we weren't able to implement atomics above, disable them here. */ + /* If we weren't able to implement atomics above, disable them here. */ + #ifndef WOLFSSL_ATOMIC_OPS #define WOLFSSL_NO_ATOMICS #endif #endif From 7916db78e816bac2a0b3d4cd29680221a748e3d4 Mon Sep 17 00:00:00 2001 From: Daniel Pouzzner Date: Thu, 13 Nov 2025 17:28:19 -0600 Subject: [PATCH 3/3] wolfcrypt/src/wc_port.c and wolfssl/wolfcrypt/wc_port.h: change precedence of atomic implementations, and don't use the stdatomic.h in C++ builds (not compatible); fix the name of the wolfSSL_Atomic_Ptr_CompareExchange() implementation in the _MSC_VER code path. --- wolfcrypt/src/wc_port.c | 191 ++++++++++++++++++------------------ wolfssl/wolfcrypt/wc_port.h | 19 ++-- 2 files changed, 107 insertions(+), 103 deletions(-) diff --git a/wolfcrypt/src/wc_port.c b/wolfcrypt/src/wc_port.c index 93ca4bebe..35a0f83d0 100644 --- a/wolfcrypt/src/wc_port.c +++ b/wolfcrypt/src/wc_port.c @@ -1271,11 +1271,105 @@ char* wc_strdup_ex(const char *src, int memType) { } #endif -#if defined(WOLFSSL_ATOMIC_OPS) && !defined(SINGLE_THREADED) +#ifdef WOLFSSL_ATOMIC_OPS #if defined(WOLFSSL_USER_DEFINED_ATOMICS) -#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) +#elif defined(SINGLE_THREADED) + +#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) +/* direct calls using gcc-style compiler built-ins */ + +void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) +{ + *c = i; +} + +void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i) +{ + *c = i; +} + +int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i) +{ + return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i, + int new_i) +{ + /* For the success path, use full synchronization with barriers -- + * "Sequentially-consistent ordering" -- so that all threads see the same + * "single total modification order of all atomic operations" -- but on + * failure we just need to be sure we acquire the value that changed out + * from under us. + */ + return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */, + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + +unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); +} + +unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); +} + +unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); +} + +unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c, + unsigned int i) +{ + return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); +} + +int wolfSSL_Atomic_Uint_CompareExchange( + wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i) +{ + /* For the success path, use full synchronization with barriers -- + * "Sequentially-consistent ordering" -- so that all threads see the same + * "single total modification order of all atomic operations" -- but on + * failure we just need to be sure we acquire the value that changed out + * from under us. + */ + return __atomic_compare_exchange_n( + c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + +int wolfSSL_Atomic_Ptr_CompareExchange( + void **c, void **expected_ptr, void *new_ptr) +{ + return __atomic_compare_exchange_n( + c, expected_ptr, new_ptr, 0 /* weak */, + __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); +} + +#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) && \ + !defined(__cplusplus) /* Default C Implementation */ void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) @@ -1374,97 +1468,6 @@ int wolfSSL_Atomic_Ptr_CompareExchange( __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); } -#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) -/* direct calls using gcc-style compiler built-ins */ - -void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) -{ - *c = i; -} - -void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i) -{ - *c = i; -} - -int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i) -{ - return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i, - int new_i) -{ - /* For the success path, use full synchronization with barriers -- - * "Sequentially-consistent ordering" -- so that all threads see the same - * "single total modification order of all atomic operations" -- but on - * failure we just need to be sure we acquire the value that changed out - * from under us. - */ - return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */, - __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); -} - -unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_fetch_add(c, i, __ATOMIC_RELAXED); -} - -unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED); -} - -unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_add_fetch(c, i, __ATOMIC_RELAXED); -} - -unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c, - unsigned int i) -{ - return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED); -} - -int wolfSSL_Atomic_Uint_CompareExchange( - wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i) -{ - /* For the success path, use full synchronization with barriers -- - * "Sequentially-consistent ordering" -- so that all threads see the same - * "single total modification order of all atomic operations" -- but on - * failure we just need to be sure we acquire the value that changed out - * from under us. - */ - return __atomic_compare_exchange_n( - c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); -} - -int wolfSSL_Atomic_Ptr_CompareExchange( - void **c, void **expected_ptr, void *new_ptr) -{ - return __atomic_compare_exchange_n( - c, expected_ptr, new_ptr, 0 /* weak */, - __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE); -} - #elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API) void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i) @@ -1557,7 +1560,7 @@ int wolfSSL_Atomic_Uint_CompareExchange( } } -int wolfSSL_Atomic_Uint_CompareExchange( +int wolfSSL_Atomic_Ptr_CompareExchange( void ** c, void **expected_ptr, void *new_ptr) { #ifdef _WIN64 diff --git a/wolfssl/wolfcrypt/wc_port.h b/wolfssl/wolfcrypt/wc_port.h index 0a56c1cd0..382f4f3ec 100644 --- a/wolfssl/wolfcrypt/wc_port.h +++ b/wolfssl/wolfcrypt/wc_port.h @@ -494,15 +494,6 @@ #define WOLFSSL_ATOMIC_LOAD(x) (x) #define WOLFSSL_ATOMIC_STORE(x, val) (x) = (val) #define WOLFSSL_ATOMIC_OPS - #elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) - /* Default C Implementation */ - #include - typedef atomic_int wolfSSL_Atomic_Int; - typedef atomic_uint wolfSSL_Atomic_Uint; - #define WOLFSSL_ATOMIC_INITIALIZER(x) (x) - #define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x)) - #define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val) - #define WOLFSSL_ATOMIC_OPS #elif defined(__GNUC__) && defined(__ATOMIC_CONSUME) /* direct calls using gcc-style compiler built-ins */ typedef volatile int wolfSSL_Atomic_Int; @@ -513,6 +504,16 @@ #define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \ val, __ATOMIC_RELEASE) #define WOLFSSL_ATOMIC_OPS + #elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) && \ + !defined(__cplusplus) + /* Default C Implementation */ + #include + typedef atomic_int wolfSSL_Atomic_Int; + typedef atomic_uint wolfSSL_Atomic_Uint; + #define WOLFSSL_ATOMIC_INITIALIZER(x) (x) + #define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x)) + #define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val) + #define WOLFSSL_ATOMIC_OPS #elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API) /* Use MSVC compiler intrinsics for atomic ops */ #ifdef _WIN32_WCE