Merge branch 'feature/newlib_iram_cleanup' into 'master'

feat(newlib): optimize IRAM usage

Closes IDF-7146

See merge request espressif/esp-idf!37647
This commit is contained in:
Marius Vikhammer
2025-03-14 10:28:45 +08:00
5 changed files with 43 additions and 28 deletions

View File

@@ -11,6 +11,14 @@ menu "LibC"
depends on !IDF_TOOLCHAIN_CLANG && IDF_EXPERIMENTAL_FEATURES
endchoice
config LIBC_LOCKS_PLACE_IN_IRAM
bool "Place lock API in IRAM"
default y
depends on LIBC_NEWLIB
help
Enable this option to include be able to call the lock API from
code that runs while cache is disabled, e.g. IRAM interrupts.
choice LIBC_STDOUT_LINE_ENDING
prompt "Line ending for console output"
default LIBC_STDOUT_LINE_ENDING_CRLF

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -16,6 +16,12 @@
#include "sdkconfig.h"
#if CONFIG_LIBC_LOCKS_PLACE_IN_IRAM
#define NEWLIB_LOCKS_IRAM_ATTR IRAM_ATTR
#else
#define NEWLIB_LOCKS_IRAM_ATTR
#endif
/* Notes on our newlib lock implementation:
*
* - Use FreeRTOS mutex semaphores as locks.
@@ -44,7 +50,7 @@ static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED;
Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have
been initialised (to zero only) before the RTOS scheduler started.
*/
static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type)
static void NEWLIB_LOCKS_IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type)
{
portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) {
@@ -75,13 +81,13 @@ static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type)
portEXIT_CRITICAL(&lock_init_spinlock);
}
void IRAM_ATTR _lock_init(_lock_t *lock)
void NEWLIB_LOCKS_IRAM_ATTR _lock_init(_lock_t *lock)
{
*lock = 0; // In case lock's memory is uninitialized
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
}
void IRAM_ATTR _lock_init_recursive(_lock_t *lock)
void NEWLIB_LOCKS_IRAM_ATTR _lock_init_recursive(_lock_t *lock)
{
*lock = 0; // In case lock's memory is uninitialized
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
@@ -97,7 +103,7 @@ void IRAM_ATTR _lock_init_recursive(_lock_t *lock)
re-initialised if it is used again. Caller has to avoid doing
this!
*/
void IRAM_ATTR _lock_close(_lock_t *lock)
void NEWLIB_LOCKS_IRAM_ATTR _lock_close(_lock_t *lock)
{
portENTER_CRITICAL(&lock_init_spinlock);
if (*lock) {
@@ -116,7 +122,7 @@ void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close")));
/* Acquire the mutex semaphore for lock. wait up to delay ticks.
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
*/
static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type)
static int NEWLIB_LOCKS_IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type)
{
SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock);
if (!h) {
@@ -158,22 +164,22 @@ static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t
return (success == pdTRUE) ? 0 : -1;
}
void IRAM_ATTR _lock_acquire(_lock_t *lock)
void NEWLIB_LOCKS_IRAM_ATTR _lock_acquire(_lock_t *lock)
{
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX);
}
void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
void NEWLIB_LOCKS_IRAM_ATTR _lock_acquire_recursive(_lock_t *lock)
{
lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}
int IRAM_ATTR _lock_try_acquire(_lock_t *lock)
int NEWLIB_LOCKS_IRAM_ATTR _lock_try_acquire(_lock_t *lock)
{
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX);
}
int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
int NEWLIB_LOCKS_IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
{
return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}
@@ -181,7 +187,7 @@ int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock)
/* Release the mutex semaphore for lock.
mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX
*/
static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type)
static void NEWLIB_LOCKS_IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type)
{
if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) {
return; /* locking is a no-op before scheduler is up */
@@ -207,12 +213,12 @@ static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type)
}
}
void IRAM_ATTR _lock_release(_lock_t *lock)
void NEWLIB_LOCKS_IRAM_ATTR _lock_release(_lock_t *lock)
{
lock_release_generic(lock, queueQUEUE_TYPE_MUTEX);
}
void IRAM_ATTR _lock_release_recursive(_lock_t *lock)
void NEWLIB_LOCKS_IRAM_ATTR _lock_release_recursive(_lock_t *lock)
{
lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}
@@ -285,69 +291,69 @@ static StaticSemaphore_t s_common_recursive_mutex;
#define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead)
#endif // ROM_NEEDS_MUTEX_OVERRIDE
void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_init(_LOCK_T *lock)
{
*lock = NULL; /* In case lock's memory is uninitialized */
lock_init_generic(lock, queueQUEUE_TYPE_MUTEX);
}
void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock)
{
*lock = NULL; /* In case lock's memory is uninitialized */
lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX);
}
void IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_close(_LOCK_T lock)
{
_lock_close(&lock);
}
void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock)
{
_lock_close_recursive(&lock);
}
/* Separate function, to prevent generating multiple assert strings */
static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
static void NEWLIB_LOCKS_IRAM_ATTR check_lock_nonzero(_LOCK_T lock)
{
assert(lock != NULL && "Uninitialized lock used");
}
void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
_lock_acquire(&lock);
}
void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
_lock_acquire_recursive(&lock);
}
int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
int NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex);
return _lock_try_acquire(&lock);
}
int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
int NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex);
return _lock_try_acquire_recursive(&lock);
}
void IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_release(_LOCK_T lock)
{
check_lock_nonzero(lock);
_lock_release(&lock);
}
void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock)
{
check_lock_nonzero(lock);
_lock_release_recursive(&lock);

View File

@@ -19,7 +19,7 @@
* to the streams of the global struct _reent, which are initialized in
* startup code.
*/
void IRAM_ATTR esp_reent_init(struct _reent* r)
void esp_reent_init(struct _reent* r)
{
memset(r, 0, sizeof(*r));
_REENT_STDIN(r) = _REENT_STDIN(_GLOBAL_REENT);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -152,7 +152,7 @@ WEAK_UNLESS_TIMEFUNC_IMPL int adjtime(const struct timeval *delta, struct timeva
#endif
}
clock_t IRAM_ATTR _times_r(struct _reent *r, struct tms *ptms)
clock_t _times_r(struct _reent *r, struct tms *ptms)
{
clock_t t = xTaskGetTickCount() * (portTICK_PERIOD_MS * CLK_TCK / 1000);
ptms->tms_cstime = 0;
@@ -164,7 +164,7 @@ clock_t IRAM_ATTR _times_r(struct _reent *r, struct tms *ptms)
return (clock_t) tv.tv_sec;
}
WEAK_UNLESS_TIMEFUNC_IMPL int IRAM_ATTR _gettimeofday_r(struct _reent *r, struct timeval *tv, void *tz)
WEAK_UNLESS_TIMEFUNC_IMPL int _gettimeofday_r(struct _reent *r, struct timeval *tv, void *tz)
{
(void) tz;

View File

@@ -193,6 +193,7 @@ The following options will reduce IRAM usage of some ESP-IDF features:
- Refer to the sdkconfig menu ``Auto-detect Flash chips``, and you can disable flash drivers which you do not need to save some IRAM.
:SOC_GPSPI_SUPPORTED: - Enable :ref:`CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH`. Provided that :ref:`CONFIG_SPI_MASTER_ISR_IN_IRAM` is not enabled and the heap functions are not incorrectly used from ISRs, this option is safe to enable in all configurations.
:esp32c2: - Enable :ref:`CONFIG_BT_RELEASE_IRAM`. Release BT text section and merge BT data, bss & text into a large free heap region when ``esp_bt_mem_release`` is called. This makes Bluetooth unavailable until the next restart, but saving ~22 KB or more of IRAM.
- Disable :ref:`CONFIG_LIBC_LOCKS_PLACE_IN_IRAM` if no ISRs that run while cache is disable (i.e. IRAM ISRs) are using libc lock APIs.
.. only:: esp32