wolfssl/wolfcrypt/wc_port.h and wolfcrypt/src/wc_port.c: use stdatomic.h implementation as C default when available, as before, for proper type annotation in objects.

This commit is contained in:
Daniel Pouzzner
2025-11-14 07:54:14 -06:00
parent 10a60fc41b
commit 135bb66352
2 changed files with 101 additions and 101 deletions

View File

@@ -1277,97 +1277,6 @@ char* wc_strdup_ex(const char *src, int memType) {
#elif defined(SINGLE_THREADED)
#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED)
/* direct calls using gcc-style compiler built-ins */
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
{
*c = i;
}
void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i)
{
*c = i;
}
int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i,
int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Uint_CompareExchange(
wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(
c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
int wolfSSL_Atomic_Ptr_CompareExchange(
void **c, void **expected_ptr, void *new_ptr)
{
return __atomic_compare_exchange_n(
c, expected_ptr, new_ptr, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) && \
!defined(__cplusplus)
@@ -1468,6 +1377,97 @@ int wolfSSL_Atomic_Ptr_CompareExchange(
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED)
/* direct calls using gcc-style compiler built-ins */
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
{
*c = i;
}
void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i)
{
*c = i;
}
int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i,
int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Uint_CompareExchange(
wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(
c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
int wolfSSL_Atomic_Ptr_CompareExchange(
void **c, void **expected_ptr, void *new_ptr)
{
return __atomic_compare_exchange_n(
c, expected_ptr, new_ptr, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
#elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API)
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)

View File

@@ -494,16 +494,6 @@
#define WOLFSSL_ATOMIC_LOAD(x) (x)
#define WOLFSSL_ATOMIC_STORE(x, val) (x) = (val)
#define WOLFSSL_ATOMIC_OPS
#elif defined(__GNUC__) && defined(__ATOMIC_CONSUME)
/* direct calls using gcc-style compiler built-ins */
typedef volatile int wolfSSL_Atomic_Int;
typedef volatile unsigned int wolfSSL_Atomic_Uint;
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
#define WOLFSSL_ATOMIC_LOAD(x) __atomic_load_n(&(x), \
__ATOMIC_CONSUME)
#define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \
val, __ATOMIC_RELEASE)
#define WOLFSSL_ATOMIC_OPS
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H) && \
!defined(__cplusplus)
/* Default C Implementation */
@@ -514,6 +504,16 @@
#define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x))
#define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val)
#define WOLFSSL_ATOMIC_OPS
#elif defined(__GNUC__) && defined(__ATOMIC_CONSUME)
/* direct calls using gcc-style compiler built-ins */
typedef volatile int wolfSSL_Atomic_Int;
typedef volatile unsigned int wolfSSL_Atomic_Uint;
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
#define WOLFSSL_ATOMIC_LOAD(x) __atomic_load_n(&(x), \
__ATOMIC_CONSUME)
#define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \
val, __ATOMIC_RELEASE)
#define WOLFSSL_ATOMIC_OPS
#elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API)
/* Use MSVC compiler intrinsics for atomic ops */
#ifdef _WIN32_WCE