From 20bbd2af42ea3d5583e080d790b3abdfd4e86ec0 Mon Sep 17 00:00:00 2001 From: Marius Vikhammer Date: Mon, 10 Mar 2025 17:45:24 +0800 Subject: [PATCH] feat(newlib): add option for placing newlib lock API into flash --- components/newlib/Kconfig | 8 +++ components/newlib/src/locks.c | 54 +++++++++++--------- components/newlib/src/time.c | 2 - docs/en/api-guides/performance/ram-usage.rst | 1 + 4 files changed, 39 insertions(+), 26 deletions(-) diff --git a/components/newlib/Kconfig b/components/newlib/Kconfig index 23f6b6a9e8..221ef36a94 100644 --- a/components/newlib/Kconfig +++ b/components/newlib/Kconfig @@ -11,6 +11,14 @@ menu "LibC" depends on !IDF_TOOLCHAIN_CLANG && IDF_EXPERIMENTAL_FEATURES endchoice + config LIBC_LOCKS_PLACE_IN_IRAM + bool "Place lock API in IRAM" + default y + depends on LIBC_NEWLIB + help + Enable this option to include be able to call the lock API from + code that runs while cache is disabled, e.g. IRAM interrupts. + choice LIBC_STDOUT_LINE_ENDING prompt "Line ending for console output" default LIBC_STDOUT_LINE_ENDING_CRLF diff --git a/components/newlib/src/locks.c b/components/newlib/src/locks.c index a4570b5682..582256dc3d 100644 --- a/components/newlib/src/locks.c +++ b/components/newlib/src/locks.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -16,6 +16,12 @@ #include "sdkconfig.h" +#if CONFIG_LIBC_LOCKS_PLACE_IN_IRAM +#define NEWLIB_LOCKS_IRAM_ATTR IRAM_ATTR +#else +#define NEWLIB_LOCKS_IRAM_ATTR +#endif + /* Notes on our newlib lock implementation: * * - Use FreeRTOS mutex semaphores as locks. @@ -44,7 +50,7 @@ static portMUX_TYPE lock_init_spinlock = portMUX_INITIALIZER_UNLOCKED; Called by _lock_init*, also called by _lock_acquire* to lazily initialize locks that might have been initialised (to zero only) before the RTOS scheduler started. */ -static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) +static void NEWLIB_LOCKS_IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) { portENTER_CRITICAL(&lock_init_spinlock); if (*lock) { @@ -75,13 +81,13 @@ static void IRAM_ATTR lock_init_generic(_lock_t *lock, uint8_t mutex_type) portEXIT_CRITICAL(&lock_init_spinlock); } -void IRAM_ATTR _lock_init(_lock_t *lock) +void NEWLIB_LOCKS_IRAM_ATTR _lock_init(_lock_t *lock) { *lock = 0; // In case lock's memory is uninitialized lock_init_generic(lock, queueQUEUE_TYPE_MUTEX); } -void IRAM_ATTR _lock_init_recursive(_lock_t *lock) +void NEWLIB_LOCKS_IRAM_ATTR _lock_init_recursive(_lock_t *lock) { *lock = 0; // In case lock's memory is uninitialized lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); @@ -97,7 +103,7 @@ void IRAM_ATTR _lock_init_recursive(_lock_t *lock) re-initialised if it is used again. Caller has to avoid doing this! */ -void IRAM_ATTR _lock_close(_lock_t *lock) +void NEWLIB_LOCKS_IRAM_ATTR _lock_close(_lock_t *lock) { portENTER_CRITICAL(&lock_init_spinlock); if (*lock) { @@ -116,7 +122,7 @@ void _lock_close_recursive(_lock_t *lock) __attribute__((alias("_lock_close"))); /* Acquire the mutex semaphore for lock. wait up to delay ticks. mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX */ -static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) +static int NEWLIB_LOCKS_IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t mutex_type) { SemaphoreHandle_t h = (SemaphoreHandle_t)(*lock); if (!h) { @@ -158,22 +164,22 @@ static int IRAM_ATTR lock_acquire_generic(_lock_t *lock, uint32_t delay, uint8_t return (success == pdTRUE) ? 0 : -1; } -void IRAM_ATTR _lock_acquire(_lock_t *lock) +void NEWLIB_LOCKS_IRAM_ATTR _lock_acquire(_lock_t *lock) { lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_MUTEX); } -void IRAM_ATTR _lock_acquire_recursive(_lock_t *lock) +void NEWLIB_LOCKS_IRAM_ATTR _lock_acquire_recursive(_lock_t *lock) { lock_acquire_generic(lock, portMAX_DELAY, queueQUEUE_TYPE_RECURSIVE_MUTEX); } -int IRAM_ATTR _lock_try_acquire(_lock_t *lock) +int NEWLIB_LOCKS_IRAM_ATTR _lock_try_acquire(_lock_t *lock) { return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_MUTEX); } -int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) +int NEWLIB_LOCKS_IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) { return lock_acquire_generic(lock, 0, queueQUEUE_TYPE_RECURSIVE_MUTEX); } @@ -181,7 +187,7 @@ int IRAM_ATTR _lock_try_acquire_recursive(_lock_t *lock) /* Release the mutex semaphore for lock. mutex_type is queueQUEUE_TYPE_RECURSIVE_MUTEX or queueQUEUE_TYPE_MUTEX */ -static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) +static void NEWLIB_LOCKS_IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) { if (xTaskGetSchedulerState() == taskSCHEDULER_NOT_STARTED) { return; /* locking is a no-op before scheduler is up */ @@ -207,12 +213,12 @@ static void IRAM_ATTR lock_release_generic(_lock_t *lock, uint8_t mutex_type) } } -void IRAM_ATTR _lock_release(_lock_t *lock) +void NEWLIB_LOCKS_IRAM_ATTR _lock_release(_lock_t *lock) { lock_release_generic(lock, queueQUEUE_TYPE_MUTEX); } -void IRAM_ATTR _lock_release_recursive(_lock_t *lock) +void NEWLIB_LOCKS_IRAM_ATTR _lock_release_recursive(_lock_t *lock) { lock_release_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); } @@ -285,69 +291,69 @@ static StaticSemaphore_t s_common_recursive_mutex; #define MAYBE_OVERRIDE_LOCK(_lock, _lock_to_use_instead) #endif // ROM_NEEDS_MUTEX_OVERRIDE -void IRAM_ATTR __retarget_lock_init(_LOCK_T *lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_init(_LOCK_T *lock) { *lock = NULL; /* In case lock's memory is uninitialized */ lock_init_generic(lock, queueQUEUE_TYPE_MUTEX); } -void IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_init_recursive(_LOCK_T *lock) { *lock = NULL; /* In case lock's memory is uninitialized */ lock_init_generic(lock, queueQUEUE_TYPE_RECURSIVE_MUTEX); } -void IRAM_ATTR __retarget_lock_close(_LOCK_T lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_close(_LOCK_T lock) { _lock_close(&lock); } -void IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_close_recursive(_LOCK_T lock) { _lock_close_recursive(&lock); } /* Separate function, to prevent generating multiple assert strings */ -static void IRAM_ATTR check_lock_nonzero(_LOCK_T lock) +static void NEWLIB_LOCKS_IRAM_ATTR check_lock_nonzero(_LOCK_T lock) { assert(lock != NULL && "Uninitialized lock used"); } -void IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_acquire(_LOCK_T lock) { check_lock_nonzero(lock); MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex); _lock_acquire(&lock); } -void IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_acquire_recursive(_LOCK_T lock) { check_lock_nonzero(lock); MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex); _lock_acquire_recursive(&lock); } -int IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock) +int NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_try_acquire(_LOCK_T lock) { check_lock_nonzero(lock); MAYBE_OVERRIDE_LOCK(lock, &s_common_mutex); return _lock_try_acquire(&lock); } -int IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock) +int NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_try_acquire_recursive(_LOCK_T lock) { check_lock_nonzero(lock); MAYBE_OVERRIDE_LOCK(lock, &s_common_recursive_mutex); return _lock_try_acquire_recursive(&lock); } -void IRAM_ATTR __retarget_lock_release(_LOCK_T lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_release(_LOCK_T lock) { check_lock_nonzero(lock); _lock_release(&lock); } -void IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock) +void NEWLIB_LOCKS_IRAM_ATTR __retarget_lock_release_recursive(_LOCK_T lock) { check_lock_nonzero(lock); _lock_release_recursive(&lock); diff --git a/components/newlib/src/time.c b/components/newlib/src/time.c index ff67fe8850..1338803af0 100644 --- a/components/newlib/src/time.c +++ b/components/newlib/src/time.c @@ -174,8 +174,6 @@ WEAK_UNLESS_TIMEFUNC_IMPL int _gettimeofday_r(struct _reent *r, struct timeval * tv->tv_sec = microseconds / 1000000; tv->tv_usec = microseconds % 1000000; } - __errno_r(r) = ENOSYS; - return 0; #else __errno_r(r) = ENOSYS; diff --git a/docs/en/api-guides/performance/ram-usage.rst b/docs/en/api-guides/performance/ram-usage.rst index d5435bf9fd..ba1d68fba4 100644 --- a/docs/en/api-guides/performance/ram-usage.rst +++ b/docs/en/api-guides/performance/ram-usage.rst @@ -193,6 +193,7 @@ The following options will reduce IRAM usage of some ESP-IDF features: - Refer to the sdkconfig menu ``Auto-detect Flash chips``, and you can disable flash drivers which you do not need to save some IRAM. :SOC_GPSPI_SUPPORTED: - Enable :ref:`CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH`. Provided that :ref:`CONFIG_SPI_MASTER_ISR_IN_IRAM` is not enabled and the heap functions are not incorrectly used from ISRs, this option is safe to enable in all configurations. :esp32c2: - Enable :ref:`CONFIG_BT_RELEASE_IRAM`. Release BT text section and merge BT data, bss & text into a large free heap region when ``esp_bt_mem_release`` is called. This makes Bluetooth unavailable until the next restart, but saving ~22 KB or more of IRAM. + - Disable :ref:`CONFIG_LIBC_LOCKS_PLACE_IN_IRAM` if no ISRs that run while cache is disable (i.e. IRAM ISRs) are using libc lock APIs. .. only:: esp32