src/ssl.c and wolfssl/ssl.h: fix signature on wolfSSL_CTX_get0_privatekey() -- ctx is not const;

wolfcrypt/src/wc_port.c and wolfssl/wolfcrypt/wc_port.h: tweak gates on atomic implementations to maximize availability within currently supported targets;

fix some whitespace.
This commit is contained in:
Daniel Pouzzner
2025-11-13 17:11:52 -06:00
parent 26ba6344f2
commit c430cc75ea
5 changed files with 129 additions and 133 deletions

View File

@@ -969,6 +969,7 @@ __ARCH_STRSTR_NO_REDIRECT
__ARM_ARCH_7M__
__ARM_FEATURE_CRYPTO
__ASSEMBLER__
__ATOMIC_CONSUME
__ATOMIC_RELAXED
__AVR_ARCH__
__AVR__

View File

@@ -7559,7 +7559,7 @@ int wolfSSL_CTX_check_private_key(const WOLFSSL_CTX* ctx)
* Return the private key of the WOLFSSL_CTX struct
* @return WOLFSSL_EVP_PKEY* The caller doesn *NOT*` free the returned object.
*/
WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(const WOLFSSL_CTX* ctx)
WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(WOLFSSL_CTX* ctx)
{
WOLFSSL_EVP_PKEY* res;
const unsigned char *key;

View File

@@ -1273,98 +1273,9 @@ char* wc_strdup_ex(const char *src, int memType) {
#if defined(WOLFSSL_ATOMIC_OPS) && !defined(SINGLE_THREADED)
#ifdef HAVE_C___ATOMIC
/* Atomic ops using standard C lib */
#ifdef __cplusplus
/* C++ using direct calls to compiler built-in functions */
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
{
*c = i;
}
#if defined(WOLFSSL_USER_DEFINED_ATOMICS)
void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i)
{
*c = i;
}
int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i,
int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Uint_CompareExchange(
wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(
c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
int wolfSSL_Atomic_Ptr_CompareExchange(
void **c, void **expected_ptr, void *new_ptr)
{
return __atomic_compare_exchange_n(
c, expected_ptr, new_ptr, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
#else
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H)
/* Default C Implementation */
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
@@ -1459,12 +1370,102 @@ int wolfSSL_Atomic_Ptr_CompareExchange(
* requirements.
*/
return __atomic_compare_exchange_n(
c, expected_ptr, new_ptr, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
c, expected_ptr, new_ptr, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
#endif /* __cplusplus */
#elif defined(__GNUC__) && defined(__ATOMIC_RELAXED)
/* direct calls using gcc-style compiler built-ins */
#elif defined(_MSC_VER)
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
{
*c = i;
}
void wolfSSL_Atomic_Uint_Init(wolfSSL_Atomic_Uint* c, unsigned int i)
{
*c = i;
}
int wolfSSL_Atomic_Int_FetchAdd(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_FetchSub(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_AddFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_SubFetch(wolfSSL_Atomic_Int* c, int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Int_CompareExchange(wolfSSL_Atomic_Int* c, int *expected_i,
int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(c, expected_i, new_i, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
unsigned int wolfSSL_Atomic_Uint_FetchAdd(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_add(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_FetchSub(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_fetch_sub(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_AddFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_add_fetch(c, i, __ATOMIC_RELAXED);
}
unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c,
unsigned int i)
{
return __atomic_sub_fetch(c, i, __ATOMIC_RELAXED);
}
int wolfSSL_Atomic_Uint_CompareExchange(
wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i)
{
/* For the success path, use full synchronization with barriers --
* "Sequentially-consistent ordering" -- so that all threads see the same
* "single total modification order of all atomic operations" -- but on
* failure we just need to be sure we acquire the value that changed out
* from under us.
*/
return __atomic_compare_exchange_n(
c, expected_i, new_i, 0 /* weak */, __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
int wolfSSL_Atomic_Ptr_CompareExchange(
void **c, void **expected_ptr, void *new_ptr)
{
return __atomic_compare_exchange_n(
c, expected_ptr, new_ptr, 0 /* weak */,
__ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
}
#elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API)
void wolfSSL_Atomic_Int_Init(wolfSSL_Atomic_Int* c, int i)
{
@@ -1545,8 +1546,8 @@ unsigned int wolfSSL_Atomic_Uint_SubFetch(wolfSSL_Atomic_Uint* c,
int wolfSSL_Atomic_Uint_CompareExchange(
wolfSSL_Atomic_Uint* c, unsigned int *expected_i, unsigned int new_i)
{
long actual_i = InterlockedCompareExchange
((wolfSSL_Atomic_Int *)c, (long)new_i, (long)*expected_i);
long actual_i = InterlockedCompareExchange(
(wolfSSL_Atomic_Int *)c, (long)new_i, (long)*expected_i);
if (actual_i == (long)*expected_i) {
return 1;
}
@@ -1560,23 +1561,23 @@ int wolfSSL_Atomic_Uint_CompareExchange(
void ** c, void **expected_ptr, void *new_ptr)
{
#ifdef _WIN64
LONG64 actual_ptr = InterlockedCompareExchange64
((LONG64 *)c, (LONG64)new_i, (LONG64)*expected_i);
if (actual_ptr == (LONG64)*expected_i) {
LONG64 actual_ptr = InterlockedCompareExchange64(
(LONG64 *)c, (LONG64)new_ptr, (LONG64)*expected_ptr);
if (actual_ptr == (LONG64)*expected_ptr) {
return 1;
}
else {
*expected_i = (void *)actual_ptr;
*expected_ptr = (void *)actual_ptr;
return 0;
}
#else /* !_WIN64 */
LONG actual_ptr = InterlockedCompareExchange
((LONG *)c, (LONG)new_i, (LONG)*expected_i);
if (actual_ptr == (LONG)*expected_i) {
LONG actual_ptr = InterlockedCompareExchange(
(LONG *)c, (LONG)new_ptr, (LONG)*expected_ptr);
if (actual_ptr == (LONG)*expected_ptr) {
return 1;
}
else {
*expected_i = (void *)actual_ptr;
*expected_ptr = (void *)actual_ptr;
return 0;
}
#endif /* !_WIN64 */

View File

@@ -3228,7 +3228,7 @@ WOLFSSL_API int wolfSSL_want_write(WOLFSSL* ssl);
#ifdef OPENSSL_EXTRA
WOLFSSL_API int wolfSSL_want(WOLFSSL* ssl);
WOLFSSL_API WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(const WOLFSSL_CTX* ctx);
WOLFSSL_API WOLFSSL_EVP_PKEY* wolfSSL_CTX_get0_privatekey(WOLFSSL_CTX* ctx);
#include <stdarg.h> /* var_arg */
WOLFSSL_API int wolfSSL_BIO_vprintf(WOLFSSL_BIO* bio, const char* format,

View File

@@ -494,31 +494,25 @@
#define WOLFSSL_ATOMIC_LOAD(x) (x)
#define WOLFSSL_ATOMIC_STORE(x, val) (x) = (val)
#define WOLFSSL_ATOMIC_OPS
#elif defined(HAVE_C___ATOMIC)
#ifdef __cplusplus
#if defined(__GNUC__) && defined(__ATOMIC_RELAXED)
/* C++ using direct calls to compiler built-in functions */
typedef volatile int wolfSSL_Atomic_Int;
typedef volatile unsigned int wolfSSL_Atomic_Uint;
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
#define WOLFSSL_ATOMIC_LOAD(x) __atomic_load_n(&(x), \
__ATOMIC_CONSUME)
#define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \
val, __ATOMIC_RELEASE)
#define WOLFSSL_ATOMIC_OPS
#endif
#else
#ifdef WOLFSSL_HAVE_ATOMIC_H
/* Default C Implementation */
#include <stdatomic.h>
typedef atomic_int wolfSSL_Atomic_Int;
typedef atomic_uint wolfSSL_Atomic_Uint;
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
#define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x))
#define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val)
#define WOLFSSL_ATOMIC_OPS
#endif /* WOLFSSL_HAVE_ATOMIC_H */
#endif
#elif defined(HAVE_C___ATOMIC) && defined(WOLFSSL_HAVE_ATOMIC_H)
/* Default C Implementation */
#include <stdatomic.h>
typedef atomic_int wolfSSL_Atomic_Int;
typedef atomic_uint wolfSSL_Atomic_Uint;
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
#define WOLFSSL_ATOMIC_LOAD(x) atomic_load(&(x))
#define WOLFSSL_ATOMIC_STORE(x, val) atomic_store(&(x), val)
#define WOLFSSL_ATOMIC_OPS
#elif defined(__GNUC__) && defined(__ATOMIC_CONSUME)
/* direct calls using gcc-style compiler built-ins */
typedef volatile int wolfSSL_Atomic_Int;
typedef volatile unsigned int wolfSSL_Atomic_Uint;
#define WOLFSSL_ATOMIC_INITIALIZER(x) (x)
#define WOLFSSL_ATOMIC_LOAD(x) __atomic_load_n(&(x), \
__ATOMIC_CONSUME)
#define WOLFSSL_ATOMIC_STORE(x, val) __atomic_store_n(&(x), \
val, __ATOMIC_RELEASE)
#define WOLFSSL_ATOMIC_OPS
#elif defined(_MSC_VER) && !defined(WOLFSSL_NOT_WINDOWS_API)
/* Use MSVC compiler intrinsics for atomic ops */
#ifdef _WIN32_WCE
@@ -534,8 +528,8 @@
#define WOLFSSL_ATOMIC_OPS
#endif
#ifndef WOLFSSL_ATOMIC_INITIALIZER
/* If we weren't able to implement atomics above, disable them here. */
/* If we weren't able to implement atomics above, disable them here. */
#ifndef WOLFSSL_ATOMIC_OPS
#define WOLFSSL_NO_ATOMICS
#endif
#endif