feat(newlib): add option for placing newlib lock API into flash

This commit is contained in:
Marius Vikhammer
2025-03-10 17:45:24 +08:00
parent 2fc73a05b3
commit 20bbd2af42
4 changed files with 39 additions and 26 deletions

View File

@@ -11,6 +11,14 @@ menu "LibC"
depends on !IDF_TOOLCHAIN_CLANG && IDF_EXPERIMENTAL_FEATURES depends on !IDF_TOOLCHAIN_CLANG && IDF_EXPERIMENTAL_FEATURES
endchoice endchoice
config LIBC_LOCKS_PLACE_IN_IRAM
bool "Place lock API in IRAM"
default y
depends on LIBC_NEWLIB
help
Enable this option to include be able to call the lock API from
code that runs while cache is disabled, e.g. IRAM interrupts.
choice LIBC_STDOUT_LINE_ENDING choice LIBC_STDOUT_LINE_ENDING
prompt "Line ending for console output" prompt "Line ending for console output"
default LIBC_STDOUT_LINE_ENDING_CRLF default LIBC_STDOUT_LINE_ENDING_CRLF

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -16,6 +16,12 @@
#include "sdkconfig.h" #include "sdkconfig.h"
#if CONFIG_LIBC_LOCKS_PLACE_IN_IRAM
#define NEWLIB_LOCKS_IRAM_ATTR IRAM_ATTR
#else
#define NEWLIB_LOCKS_IRAM_ATTR
#endif
/* Notes on our newlib lock implementation: /* Notes on our newlib lock implementation:
* *
* - Use FreeRTOS mutex semaphores as locks. * - Use FreeRTOS mutex semaphores as locks.
@@ -44,7 +50,7 @@ static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED;
Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have
been initialised (to zero only) before the RTOS scheduler started. been initialised (to zero only) before the RTOS scheduler started.
*/ */
static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) static void NEWLIB_LOCKS_IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type)
{ {
portENTER_CRITICAL(&lock_init_spinlock); portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) { if (*lock) {
@@ -75,13 +81,13 @@ static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type)
portEXIT_CRITICAL(&lock_init_spinlock); portEXIT_CRITICAL(&lock_init_spinlock);
} }
void IRAM_ATTR _lock_init(_lock_t *lock) void NEWLIB_LOCKS_IRAM_ATTR _lock_init(_lock_t *lock)
{ {
*lock = 0; // In case lock's memory is uninitialized *lock = 0; // In case lock's memory is uninitialized
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX); lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
} }
void IRAM_ATTR _lock_init_recursive(_lock_t *lock) void NEWLIB_LOCKS_IRAM_ATTR _lock_init_recursive(_lock_t *lock)
{ {
*lock = 0; // In case lock's memory is uninitialized *lock = 0; // In case lock's memory is uninitialized
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
@@ -97,7 +103,7 @@ void IRAM_ATTR _lock_init_recursive(_lock_t *lock)
re-initialised if it is used again. Caller has to avoid doing re-initialised if it is used again. Caller has to avoid doing
this! this!
*/ */
void IRAM_ATTR _lock_close(_lock_t *lock) void NEWLIB_LOCKS_IRAM_ATTR _lock_close(_lock_t *lock)
{ {
portENTER_CRITICAL(&lock_init_spinlock); portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) { if (*lock) {
@@ -116,7 +122,7 @@ void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
/* Acquire the mutex semaphore for lock. wait up to delay ticks. /* Acquire the mutex semaphore for lock. wait up to delay ticks.
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
*/ */
static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) static int NEWLIB_LOCKS_IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type)
{ {
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock); SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
if (!h) { if (!h) {
@@ -158,22 +164,22 @@ static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t
return (success == pdTRUE) ? 0 : -1; return (success == pdTRUE) ? 0 : -1;
} }
void IRAM_ATTR _lock_acquire(_lock_t *lock) void NEWLIB_LOCKS_IRAM_ATTR _lock_acquire(_lock_t *lock)
{ {
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX); lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX);
} }
void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock) void NEWLIB_LOCKS_IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
{ {
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX); lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
int IRAM_ATTR _lock_try_acquire(_lock_t *lock) int NEWLIB_LOCKS_IRAM_ATTR _lock_try_acquire(_lock_t *lock)
{ {
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX); return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX);
} }
int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) int NEWLIB_LOCKS_IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
{ {
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX); return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
@@ -181,7 +187,7 @@ int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
/* Release the mutex semaphore for lock. /* Release the mutex semaphore for lock.
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
*/ */
static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) static void NEWLIB_LOCKS_IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type)
{ {
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
return; /* locking is a no-op before scheduler is up */ return; /* locking is a no-op before scheduler is up */
@@ -207,12 +213,12 @@ static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type)
} }
} }
void IRAM_ATTR _lock_release(_lock_t *lock) void NEWLIB_LOCKS_IRAM_ATTR _lock_release(_lock_t *lock)
{ {
lock_release_generic(lock, queueQUEUE_TYPE_MUTEX); lock_release_generic(lock, queueQUEUE_TYPE_MUTEX);
} }
void IRAM_ATTR _lock_release_recursive(_lock_t *lock) void NEWLIB_LOCKS_IRAM_ATTR _lock_release_recursive(_lock_t *lock)
{ {
lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
@@ -285,69 +291,69 @@ static StaticSemaphore_t s_common_recursive_mutex;
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) #define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
#endif // ROM_NEEDS_MUTEX_OVERRIDE #endif // ROM_NEEDS_MUTEX_OVERRIDE
void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
{ {
*lock = NULL; /* In case lock's memory is uninitialized */ *lock = NULL; /* In case lock's memory is uninitialized */
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX); lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
} }
void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
{ {
*lock = NULL; /* In case lock's memory is uninitialized */ *lock = NULL; /* In case lock's memory is uninitialized */
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
} }
void IRAM_ATTR __retarget_lock_close(_LOCK_T lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
{ {
_lock_close(&lock); _lock_close(&lock);
} }
void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
{ {
_lock_close_recursive(&lock); _lock_close_recursive(&lock);
} }
/* Separate function, to prevent generating multiple assert strings */ /* Separate function, to prevent generating multiple assert strings */
static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock) static void NEWLIB_LOCKS_IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
{ {
assert(lock != NULL && "Uninitialized lock used"); assert(lock != NULL && "Uninitialized lock used");
} }
void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
{ {
check_lock_nonzero(lock); check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex); MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
_lock_acquire(&lock); _lock_acquire(&lock);
} }
void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
{ {
check_lock_nonzero(lock); check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex); MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
_lock_acquire_recursive(&lock); _lock_acquire_recursive(&lock);
} }
int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock) int NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
{ {
check_lock_nonzero(lock); check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex); MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
return _lock_try_acquire(&lock); return _lock_try_acquire(&lock);
} }
int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock) int NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
{ {
check_lock_nonzero(lock); check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex); MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
return _lock_try_acquire_recursive(&lock); return _lock_try_acquire_recursive(&lock);
} }
void IRAM_ATTR __retarget_lock_release(_LOCK_T lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
{ {
check_lock_nonzero(lock); check_lock_nonzero(lock);
_lock_release(&lock); _lock_release(&lock);
} }
void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock) void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
{ {
check_lock_nonzero(lock); check_lock_nonzero(lock);
_lock_release_recursive(&lock); _lock_release_recursive(&lock);

View File

@@ -174,8 +174,6 @@ WEAK_UNLESS_TIMEFUNC_IMPL int _gettimeofday_r(struct _reent *r, struct timeval *
tv->tv_sec = microseconds / 1000000; tv->tv_sec = microseconds / 1000000;
tv->tv_usec = microseconds % 1000000; tv->tv_usec = microseconds % 1000000;
} }
__errno_r(r) = ENOSYS;
return 0; return 0;
#else #else
__errno_r(r) = ENOSYS; __errno_r(r) = ENOSYS;

View File

@@ -193,6 +193,7 @@ The following options will reduce IRAM usage of some ESP-IDF features:
- Refer to the sdkconfig menu ``Auto-detect Flash chips``, and you can disable flash drivers which you do not need to save some IRAM. - Refer to the sdkconfig menu ``Auto-detect Flash chips``, and you can disable flash drivers which you do not need to save some IRAM.
:SOC_GPSPI_SUPPORTED: - Enable :ref:`CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH`. Provided that :ref:`CONFIG_SPI_MASTER_ISR_IN_IRAM` is not enabled and the heap functions are not incorrectly used from ISRs, this option is safe to enable in all configurations. :SOC_GPSPI_SUPPORTED: - Enable :ref:`CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH`. Provided that :ref:`CONFIG_SPI_MASTER_ISR_IN_IRAM` is not enabled and the heap functions are not incorrectly used from ISRs, this option is safe to enable in all configurations.
:esp32c2: - Enable :ref:`CONFIG_BT_RELEASE_IRAM`. Release BT text section and merge BT data, bss & text into a large free heap region when ``esp_bt_mem_release`` is called. This makes Bluetooth unavailable until the next restart, but saving ~22 KB or more of IRAM. :esp32c2: - Enable :ref:`CONFIG_BT_RELEASE_IRAM`. Release BT text section and merge BT data, bss & text into a large free heap region when ``esp_bt_mem_release`` is called. This makes Bluetooth unavailable until the next restart, but saving ~22 KB or more of IRAM.
- Disable :ref:`CONFIG_LIBC_LOCKS_PLACE_IN_IRAM` if no ISRs that run while cache is disable (i.e. IRAM ISRs) are using libc lock APIs.
.. only:: esp32 .. only:: esp32