Merge branch 'feat/some_xip_psram_related_optimisations_and_fixes' into 'master'

feat(esp_psram): Fix late PSRAM init and add some wasted XIP PSRAM memory in heap

Closes IDF-11463 and IDF-10555

See merge request espressif/esp-idf!37116
This commit is contained in:
Harshal Patil
2025-04-30 00:02:19 +08:00
34 changed files with 896 additions and 112 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -9,6 +9,9 @@
#include "esp_cpu.h"
#include "esp_fault.h"
#include "esp32c5/rom/rom_layout.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif /* CONFIG_SPIRAM */
#ifdef BOOTLOADER_BUILD
// Without L bit set
@@ -26,6 +29,7 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_UP(addr, align) ((addr) & ~((align) - 1))
static void esp_cpu_configure_invalid_regions(void)
{
@@ -178,15 +182,60 @@ void esp_cpu_configure_region_protection(void)
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
const uint32_t page_aligned_irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
__attribute__((unused)) const uint32_t page_aligned_drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
/**
Virtual space layout:
_________ <- SOC_IROM_LOW
| |
|_______| <- _instruction_reserved_end
|_______| <- page_aligned_irom_resv_end
| |
|_______| <- _rodata_reserved_end
|_______| <- page_aligned_drom_resv_end
| |
| |
| |
|_______| <- page_aligned_drom_resv_end + available_psram_heap
| |
| |
| |
| |
|_______| <- SOC_DROM_HIGH
if CONFIG_SPIRAM_FETCH_INSTRUCTIONS: [_instruction_reserved_end, page_aligned_irom_resv_end) in heap (RW)
if CONFIG_SPIRAM_RODATA: [_rodata_reserved_end, page_aligned_drom_resv_end) in heap (RW)
if CONFIG_SPIRAM: [_rodata_reserved_end, page_aligned_drom_resv_end + available_psram_heap] in heap / reserved for mapping (RW)
*/
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(7, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, drom_resv_end, PMP_TOR | R);
PMP_ENTRY_CFG_RESET(9);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(7, (uint32_t)(&_instruction_reserved_end), PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, PMP_TOR | RW);
#else
PMP_ENTRY_SET(7, page_aligned_irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, NONE);
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#if CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(9, (uint32_t)(&_rodata_reserved_end), PMP_TOR | R);
#else
PMP_ENTRY_SET(9, page_aligned_drom_resv_end, PMP_TOR | R);
#endif /* CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
size_t available_psram_heap = esp_psram_get_heap_size_to_protect();
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(10, ALIGN_UP(page_aligned_drom_resv_end + available_psram_heap, SOC_CPU_PMP_REGION_GRANULARITY), PMP_TOR | RW);
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#else
const uint32_t pmpaddr6 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
// Add the W attribute in the case of PSRAM
@@ -201,29 +250,31 @@ void esp_cpu_configure_region_protection(void)
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared
*/
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_CFG_RESET(14);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
// First part of LP mem is reserved for ULP coprocessor
#if CONFIG_ESP_SYSTEM_PMP_LP_CORE_RESERVE_MEM_EXECUTABLE
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RWX);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RWX);
#else
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RW);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RW);
#endif
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | CONDITIONAL_RWX);
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif
// 6. Peripheral addresses
const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW);
PMP_ENTRY_CFG_RESET(15);
const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -10,6 +10,9 @@
#include "esp_cpu.h"
#include "esp_fault.h"
#include "esp32c61/rom/rom_layout.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif /* CONFIG_SPIRAM */
#ifdef BOOTLOADER_BUILD
// Without L bit set
@@ -27,6 +30,7 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_UP(addr, align) ((addr) & ~((align) - 1))
static void esp_cpu_configure_invalid_regions(void)
{
@@ -172,16 +176,60 @@ void esp_cpu_configure_region_protection(void)
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
const uint32_t page_aligned_irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
__attribute__((unused)) const uint32_t page_aligned_drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_SET(7, SOC_IROM_LOW, NONE);
/**
Virtual space layout:
_________ <- SOC_IROM_LOW
| |
|_______| <- _instruction_reserved_end
|_______| <- page_aligned_irom_resv_end
| |
|_______| <- _rodata_reserved_end
|_______| <- page_aligned_drom_resv_end
| |
| |
| |
|_______| <- page_aligned_drom_resv_end + available_psram_heap
| |
| |
| |
| |
|_______| <- SOC_DROM_HIGH
if CONFIG_SPIRAM_FETCH_INSTRUCTIONS: [_instruction_reserved_end, page_aligned_irom_resv_end) in heap (RW)
if CONFIG_SPIRAM_RODATA: [_rodata_reserved_end, page_aligned_drom_resv_end) in heap (RW)
if CONFIG_SPIRAM: [_rodata_reserved_end, page_aligned_drom_resv_end + available_psram_heap] in heap / reserved for mapping (RW)
*/
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(7, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(8, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(9, drom_resv_end, PMP_TOR | R);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(8, (uint32_t)(&_instruction_reserved_end), PMP_TOR | RX);
PMP_ENTRY_SET(9, page_aligned_irom_resv_end, PMP_TOR | RW);
#else
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(9, page_aligned_irom_resv_end, NONE);
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#if CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(10, (uint32_t)(&_rodata_reserved_end), PMP_TOR | R);
#else
PMP_ENTRY_SET(10, page_aligned_drom_resv_end, PMP_TOR | R);
#endif /* CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION*/
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
size_t available_psram_heap = esp_psram_get_heap_size_to_protect();
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_SET(11, ALIGN_UP(page_aligned_drom_resv_end + available_psram_heap, SOC_CPU_PMP_REGION_GRANULARITY), PMP_TOR | RW);
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#else
const uint32_t pmpaddr7 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
// Add the W attribute in the case of PSRAM
@@ -190,7 +238,7 @@ void esp_cpu_configure_region_protection(void)
#endif
// 5. Peripheral addresses
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | RW);
const uint32_t pmpaddr12 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(12, pmpaddr12, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -11,6 +11,9 @@
#include "esp_fault.h"
#include "hal/cache_ll.h"
#include "riscv/csr.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif /* CONFIG_SPIRAM */
#ifdef BOOTLOADER_BUILD
// Without L bit set
@@ -30,6 +33,7 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_UP(addr, align) ((addr) & ~((align) - 1))
static void esp_cpu_configure_invalid_regions(void)
{
@@ -191,16 +195,34 @@ void esp_cpu_configure_region_protection(void)
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
const uint32_t page_aligned_irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
__attribute__((unused)) const uint32_t page_aligned_drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
// 5. I_Cache / D_Cache (flash)
#if CONFIG_SPIRAM_XIP_FROM_PSRAM && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
// We could have split CONFIG_SPIRAM_XIP_FROM_PSRAM into CONFIG_SPIRAM_FETCH_INSTRUCTIONS and CONFIG_SPIRAM_RODATA
// but we don't have enough PMP entries to do so thus not allowing us finer control over the memory regions
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_SET(6, SOC_EXTRAM_LOW, NONE);
PMP_ENTRY_SET(7, (uint32_t)(&_instruction_reserved_end), PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, PMP_TOR | RW);
PMP_ENTRY_SET(9, (uint32_t)(&_rodata_reserved_end), PMP_TOR | R);
size_t available_psram_heap = esp_psram_get_heap_size_to_protect();
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(10, ALIGN_UP(page_aligned_drom_resv_end + available_psram_heap, SOC_CPU_PMP_REGION_GRANULARITY), PMP_TOR | RW);
#else
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(7, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, drom_resv_end, PMP_TOR | R);
PMP_ENTRY_SET(7, page_aligned_irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_drom_resv_end, PMP_TOR | R);
#endif /* CONFIG_SPIRAM_XIP_FROM_PSRAM && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#else
// 5. I_Cache / D_Cache (flash)
const uint32_t pmpaddr6 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
@@ -215,28 +237,28 @@ void esp_cpu_configure_region_protection(void)
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared
*/
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_CFG_RESET(14);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
// First part of LP mem is reserved for RTC reserved mem (shared between bootloader and app)
// as well as memory for ULP coprocessor
#if CONFIG_ESP_SYSTEM_PMP_LP_CORE_RESERVE_MEM_EXECUTABLE
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RWX);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RWX);
#else
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RW);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RW);
#endif
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | CONDITIONAL_RWX);
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif
// 7. Peripheral addresses
const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW);
const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@@ -2,18 +2,53 @@
#
# sourced into the "SPIRAM config" submenu for a specific chip.
config SPIRAM_BOOT_INIT
bool "Initialize SPI RAM during startup"
default "y"
config SPIRAM_BOOT_HW_INIT
bool "Initialise the PSRAM related hardware"
depends on SPIRAM
default "y" if SPIRAM
help
If this is enabled, the SPI RAM will be enabled during initial boot. Unless you
If this is enabled, the PSRAM hardware will be initialized during startup.
Enabling this config does not ensure make sure the availability of the PSRAM for usage,
but just initialises the PSRAM hardware.
This is necessary to configure PSRAM memory protection during the boot up.
config SPIRAM_BOOT_INIT
bool "Initialize PSRAM during startup, including the hardware and memory related configurations"
default "y"
depends on SPIRAM_BOOT_HW_INIT
help
If this is enabled, the PSRAM will be enabled during initial boot. Unless you
have specific requirements, you'll want to leave this enabled so memory allocated
during boot-up can also be placed in SPI RAM.
during boot-up can also be placed in PSRAM.
config SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
bool "Pre-configure memory protection for PSRAM"
default y if SPIRAM_BOOT_INIT
default n
depends on SPIRAM
help
If this is enabled, the PSRAM will be pre-configured for memory protection during initial boot.
This configuration takes into consideration the PSRAM memory configurations that are performed
by ESP-IDF's default PSRAM initialization function, esp_psram_init().
Thus, the config is enabled by default when SPIRAM_BOOT_INIT is enabled,
because the function esp_psram_init() would be called in the startup code.
In case you wish to disable SPIRAM_BOOT_INIT just for delaying the PSRAM initialization and plan
to use the ESP-IDF's default PSRAM initialization function, esp_psram_init() in the application code,
you should still enable this config to enable memory protection for the PSRAM.
Note that enabling this config also considers that the rest of the PSRAM memory that is left after
the memory configurations are performed by esp_psram_init(), can be allocated to the heap using the function
esp_psram_extram_add_to_heap_allocator(), thus configures this region with heap memory protection (RW).
As an advanced usage, if you plan to initialize the PSRAM memory regions manually by yourself without
using the function esp_psram_init(), you should disable this config to avoid any memory protection and
usage conflicts.
config SPIRAM_IGNORE_NOTFOUND
bool "Ignore PSRAM when not found"
default "n"
depends on SPIRAM_BOOT_INIT && !SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY && !SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
depends on SPIRAM_BOOT_HW_INIT && !SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY && !SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
help
Normally, if psram initialization is enabled during compile time but not found at runtime, it
is seen as an error making the CPU panic. If this is enabled, booting will complete

View File

@@ -63,6 +63,7 @@ menu "PSRAM config"
config SPIRAM_XIP_FROM_PSRAM
bool "Enable Executable in place from (XiP) from PSRAM feature (READ HELP)"
default n
depends on SPIRAM_BOOT_INIT
select SPIRAM_FETCH_INSTRUCTIONS
select SPIRAM_RODATA
select SPIRAM_FLASH_LOAD_TO_PSRAM

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -60,6 +60,29 @@ bool esp_psram_extram_test(void);
*/
void esp_psram_bss_init(void);
/**
* @brief Initialize the PSRAM hardware.
* Just initialize the PSRAM hardware, does not load the XIP segments or map the PSRAM memory
*
* @return
* - ESP_OK: On success
* - ESP_FAIL: PSRAM isn't initialized successfully, potential reason would be: wrong VDDSDIO, invalid chip ID, etc.
* - ESP_ERR_INVALID_STATE: PSRAM is initialized already
*/
esp_err_t esp_psram_chip_init(void);
/**
* @brief Calculates the effective PSRAM memory that would be / is added into the heap.
*
* @return The size of PSRAM memory that would be / is added into the heap in bytes, or 0 if PSRAM hardware isn't successfully initialized
* @note The function pre-calculates the effective size of the PSRAM memory that would be added into the heap after performing the XIP or
* ext bss and ext noinit considerations, thus, even if the function is called before esp_psram_init(), it will return the final
* effective size of the PSRAM memory that would have been added into the heap after esp_psram_init() is performed
* instead of the vanilla size of the PSRAM memory.
* This function is only available if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION is enabled.
*/
size_t esp_psram_get_heap_size_to_protect(void);
#if CONFIG_IDF_TARGET_ESP32
/**
* @brief Force a writeback of the data in the PSRAM cache. This is to be called whenever

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -17,6 +17,7 @@ extern "C" {
/**
* @brief Initialize PSRAM interface/hardware.
* Initializes the PSRAM hardware and load the XIP segments or maps the PSRAM memory
*
* @return
* - ESP_OK: On success

View File

@@ -20,6 +20,7 @@ entries:
if SPIRAM_FLASH_LOAD_TO_PSRAM = y:
esp_psram_impl_ap_hex (noflash)
mmu_psram_flash_v2 (noflash)
esp_psram: esp_psram_chip_init (noflash)
esp_psram: esp_psram_init (noflash)
esp_psram: s_psram_chip_init (noflash)
esp_psram: s_xip_psram_placement (noflash)

View File

@@ -61,6 +61,14 @@
#define PSRAM_EARLY_LOGI ESP_EARLY_LOGI
#endif
#if CONFIG_SPIRAM_RODATA
extern uint8_t _rodata_reserved_end;
#endif /* CONFIG_SPIRAM_RODATA */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
extern uint8_t _instruction_reserved_end;
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
extern uint8_t _ext_ram_bss_start;
extern uint8_t _ext_ram_bss_end;
@@ -71,6 +79,8 @@ extern uint8_t _ext_ram_noinit_start;
extern uint8_t _ext_ram_noinit_end;
#endif //#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
typedef struct {
intptr_t vaddr_start;
intptr_t vaddr_end;
@@ -78,7 +88,8 @@ typedef struct {
} psram_mem_t;
typedef struct {
bool is_initialised;
bool is_chip_initialised; // if psram hardware is initialised or not
bool is_initialised; // if psram initialised with memory mapping or not and is ready to use
/**
* @note 1
* As we can't use heap allocator during this stage, we need to statically declare these regions.
@@ -138,9 +149,9 @@ static void IRAM_ATTR s_mapping(int v_start, int size)
}
#endif //CONFIG_IDF_TARGET_ESP32
static esp_err_t s_psram_chip_init(uint32_t *out_available_size)
static esp_err_t s_psram_chip_init(void)
{
if (s_psram_ctx.is_initialised) {
if (s_psram_ctx.is_chip_initialised) {
return ESP_ERR_INVALID_STATE;
}
@@ -152,7 +163,7 @@ static esp_err_t s_psram_chip_init(uint32_t *out_available_size)
#endif
return ret;
}
s_psram_ctx.is_initialised = true;
s_psram_ctx.is_chip_initialised = true;
uint32_t psram_physical_size = 0;
ret = esp_psram_impl_get_physical_size(&psram_physical_size);
@@ -167,13 +178,6 @@ static esp_err_t s_psram_chip_init(uint32_t *out_available_size)
ESP_EARLY_LOGI(TAG, "PSRAM initialized, cache is in low/high (2-core) mode.");
#endif
#endif
uint32_t psram_available_size = 0;
ret = esp_psram_impl_get_available_size(&psram_available_size);
assert(ret == ESP_OK);
*out_available_size = psram_available_size;
return ESP_OK;
}
@@ -349,15 +353,26 @@ static void s_psram_mapping(uint32_t psram_available_size, uint32_t start_page)
#endif
}
esp_err_t esp_psram_chip_init(void)
{
return s_psram_chip_init();
}
esp_err_t esp_psram_init(void)
{
esp_err_t ret = ESP_FAIL;
uint32_t psram_available_size = 0;
ret = s_psram_chip_init(&psram_available_size);
if (ret != ESP_OK) {
return ret;
if (!s_psram_ctx.is_chip_initialised) {
ret = esp_psram_chip_init();
if (ret != ESP_OK) {
return ret;
}
}
uint32_t psram_available_size = 0;
ret = esp_psram_impl_get_available_size(&psram_available_size);
assert(ret == ESP_OK);
/**
* `start_page` is the psram physical address in MMU page size.
* MMU page size on ESP32S2 is 64KB
@@ -382,6 +397,7 @@ esp_err_t esp_psram_init(void)
cache_register_writeback(&drv);
#endif
s_psram_ctx.is_initialised = true;
return ESP_OK;
}
@@ -411,6 +427,31 @@ esp_err_t esp_psram_extram_add_to_heap_allocator(void)
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory to heap allocator",
(s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) / 1024);
// Here, SOC_MMU_DI_VADDR_SHARED is necessary because, for the targets that have separate data and instruction virtual address spaces,
// the SPIRAM gap created due to the alignment needed while placing the instruction segment in the instruction virtual address space
// cannot be added in heap because the region cannot be configured with write permissions.
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
if ((uint32_t)&_instruction_reserved_end & (CONFIG_MMU_PAGE_SIZE - 1)) {
uint32_t instruction_alignment_gap_heap_start, instruction_alignment_gap_heap_end;
mmu_psram_get_instruction_alignment_gap_info(&instruction_alignment_gap_heap_start, &instruction_alignment_gap_heap_end);
ret = heap_caps_add_region_with_caps(byte_aligned_caps, instruction_alignment_gap_heap_start, instruction_alignment_gap_heap_end);
if (ret == ESP_OK) {
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory gap generated due to end address alignment of irom to the heap allocator", (instruction_alignment_gap_heap_end - instruction_alignment_gap_heap_start) / 1024);
}
}
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
// In the case of ESP32S2, the rodata is mapped to a read-only region (SOC_DROM0_ADDRESS_LOW - SOC_DROM0_ADDRESS_HIGH), thus we cannot add this region to the heap.
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
if ((uint32_t)&_rodata_reserved_end & (CONFIG_MMU_PAGE_SIZE - 1)) {
uint32_t rodata_alignment_gap_heap_start, rodata_alignment_gap_heap_end;
mmu_psram_get_rodata_alignment_gap_info(&rodata_alignment_gap_heap_start, &rodata_alignment_gap_heap_end);
ret = heap_caps_add_region_with_caps(byte_aligned_caps, rodata_alignment_gap_heap_start, rodata_alignment_gap_heap_end);
if (ret == ESP_OK) {
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory gap generated due to end address alignment of drom to the heap allocator", (rodata_alignment_gap_heap_end - rodata_alignment_gap_heap_start) / 1024);
}
}
#endif /* CONFIG_SPIRAM_RODATA */
return ESP_OK;
}
@@ -420,8 +461,24 @@ bool IRAM_ATTR esp_psram_check_ptr_addr(const void *p)
return false;
}
return ((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end) ||
((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
if (((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end) ||
((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end)) {
return true;
}
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
if (mmu_psram_check_ptr_addr_in_rodata_alignment_gap(p)) {
return true;
}
#endif /* CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2 */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
if (mmu_psram_check_ptr_addr_in_instruction_alignment_gap(p)) {
return true;
}
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED */
return false;
}
esp_err_t esp_psram_extram_reserve_dma_pool(size_t size)
@@ -551,3 +608,94 @@ void esp_psram_bss_init(void)
memset(&_ext_ram_bss_start, 0, size);
#endif
}
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
static inline uint32_t s_get_ext_bss_size(void)
{
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_bss_end - (intptr_t)&_ext_ram_bss_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY */
}
static inline uint32_t s_get_ext_noinit_size(void)
{
#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_noinit_end - (intptr_t)&_ext_ram_noinit_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY */
}
/**
* @brief Calculates the effective PSRAM memory that would be / is mapped.
*
* @return The size of PSRAM memory that would be / is mapped in bytes, or 0 if PSRAM isn't successfully initialized
*/
static size_t esp_psram_get_effective_mapped_size(void)
{
size_t byte_aligned_size = 0;
size_t total_mapped_size = 0;
// return if the PSRAM is not enabled
if (!s_psram_ctx.is_chip_initialised) {
return 0;
}
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
uint32_t psram_available_size = 0;
esp_err_t ret = esp_psram_impl_get_available_size(&psram_available_size);
assert(ret == ESP_OK);
#if CONFIG_SPIRAM_RODATA
psram_available_size -= mmu_psram_get_rodata_segment_length();
#endif /* CONFIG_SPIRAM_RODATA */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
psram_available_size -= mmu_psram_get_text_segment_length();
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(byte_aligned_size, psram_available_size - total_mapped_size);
#if CONFIG_IDF_TARGET_ESP32S2
if (total_mapped_size < psram_available_size) {
size_t word_aligned_size = 0;
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(word_aligned_size, psram_available_size - total_mapped_size);
}
#endif
return total_mapped_size;
}
}
size_t esp_psram_get_heap_size_to_protect(void)
{
// return if the PSRAM is not enabled
if (!s_psram_ctx.is_chip_initialised) {
return 0;
}
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
size_t effective_mapped_size = esp_psram_get_effective_mapped_size();
if (effective_mapped_size == 0) {
return 0;
}
effective_mapped_size -= s_get_ext_bss_size();
effective_mapped_size -= s_get_ext_noinit_size();
#if CONFIG_IDF_TARGET_ESP32
effective_mapped_size -= esp_himem_reserved_area_size() - 1;
#endif
return effective_mapped_size;
}
}
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */

View File

@@ -1,8 +1,11 @@
# Documentation: .gitlab/ci/README.md#manifest-file-to-control-the-buildtest-apps
components/esp_psram/test_apps/psram:
enable:
- if: CONFIG_NAME == "psram_no_boot_init" and SOC_SPIRAM_SUPPORTED == 1
disable:
- if: SOC_SPIRAM_SUPPORTED != 1
- if: CONFIG_NAME == "xip_psram_no_boot_init" and SOC_SPIRAM_XIP_SUPPORTED != 1
depends_components:
- esp_psram
- esp_mm

View File

@@ -1,10 +1,15 @@
idf_build_get_property(target IDF_TARGET)
set(srcs "test_app_main.c"
"test_psram.c")
set(srcs "test_app_main.c")
if(${target} STREQUAL "esp32")
list(APPEND srcs "test_himem.c" "test_4mpsram.c")
if(CONFIG_SPIRAM_BOOT_INIT)
list(APPEND srcs "test_psram.c")
if(${target} STREQUAL "esp32")
list(APPEND srcs "test_himem.c" "test_4mpsram.c")
endif()
else()
list(APPEND srcs "test_psram_no_boot_init.c")
endif()
# In order for the cases defined by `TEST_CASE` to be linked into the final elf,

View File

@@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include "sdkconfig.h"
#include "unity.h"
#include "esp_psram.h"
#include "esp_private/esp_psram_extram.h"
TEST_CASE("test psram no boot init", "[psram_no_boot_init]")
{
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
// As PSRAM is just enabled and not initialised during the boot up, the API
// esp_psram_get_heap_size_to_protect() manually calculates the size of the PSRAM heap
size_t manually_calculated_psram_heap = esp_psram_get_heap_size_to_protect();
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
TEST_ESP_OK(esp_psram_init());
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
size_t final_psram_heap = esp_psram_get_heap_size_to_protect();
TEST_ASSERT_EQUAL(final_psram_heap, manually_calculated_psram_heap);
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
}

View File

@@ -112,3 +112,29 @@ def test_psram_esp32c5(dut: Dut) -> None:
@idf_parametrize('target', ['esp32c61'], indirect=['target'])
def test_psram_esp32c61(dut: Dut) -> None:
dut.run_all_single_board_cases()
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'xip_psram_no_boot_init',
],
indirect=True,
)
@idf_parametrize('target', ['esp32s2', 'esp32s3', 'esp32c5', 'esp32c61'], indirect=['target'])
def test_xip_psram_no_boot_init(dut: Dut) -> None:
dut.run_all_single_board_cases()
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'psram_no_boot_init',
],
indirect=True,
)
@idf_parametrize('target', ['supported_targets'], indirect=['target'])
def test_psram_no_boot_init(dut: Dut) -> None:
dut.run_all_single_board_cases()

View File

@@ -0,0 +1,4 @@
CONFIG_SPIRAM=y
CONFIG_SPIRAM_BOOT_HW_INIT=y
CONFIG_SPIRAM_BOOT_INIT=n
CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION=y

View File

@@ -0,0 +1,7 @@
CONFIG_SPIRAM=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y
CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY=y
CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY=y
CONFIG_SPIRAM_BOOT_HW_INIT=y
CONFIG_SPIRAM_BOOT_INIT=n
CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION=y

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -39,6 +39,31 @@ extern "C" {
Part 1 APIs (See @Backgrounds on top of this file)
-------------------------------------------------------------------------------*/
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/**
* @brief Calculates the size of memory that would be used for copying flash texts into PSRAM (in bytes)
*
* @return size_t size of memory that would be used for copying flash texts into PSRAM (in bytes)
*/
size_t mmu_psram_get_text_segment_length(void);
/**
* @brief Get the start and size of the instruction segment alignment gap
*
* @param[out] gap_start Start of the gap
* @param[out] gap_size Size of the gap
*/
void mmu_psram_get_instruction_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_size);
/**
* @brief Check if the pointer is in the instruction alignment gap
*
* @param[in] p Pointer to check
*
* @return true if the pointer is in the instruction alignment gap, false otherwise
*/
bool mmu_psram_check_ptr_addr_in_instruction_alignment_gap(const void *p);
/**
* @brief Copy Flash texts to PSRAM
*
@@ -50,6 +75,22 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#if CONFIG_SPIRAM_RODATA
/**
* @brief Get the start and size of the rodata segment alignment gap
*
* @param[out] gap_start Start of the gap
* @param[out] gap_size Size of the gap
*/
void mmu_psram_get_rodata_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_size);
/**
* @brief Calculates the size of memory that would be used for copying flash rodata into PSRAM (in bytes)
*
* @return size_t size of memory that would be used for copying flash rodata into PSRAM (in bytes)
*/
size_t mmu_psram_get_rodata_segment_length(void);
/**
* @brief Copy Flash rodata to PSRAM
*
@@ -58,6 +99,15 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
* @param[out] out_page Used pages
*/
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page);
/**
* @brief Check if the pointer is in the rodata alignment gap
*
* @param[in] p Pointer to check
*
* @return true if the pointer is in the rodata alignment gap, false otherwise
*/
bool mmu_psram_check_ptr_addr_in_rodata_alignment_gap(const void *p);
#endif //#if CONFIG_SPIRAM_RODATA
/*----------------------------------------------------------------------------

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -16,6 +16,7 @@
* APIs in 2 will be refactored when MMU driver is ready
*/
#include <stdbool.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_log.h"
@@ -31,6 +32,8 @@
#include "esp32s3/rom/cache.h"
#endif
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
/*----------------------------------------------------------------------------
Part 1 APIs (See @Backgrounds on top of this file)
-------------------------------------------------------------------------------*/
@@ -44,10 +47,12 @@ static uint32_t page0_page = INVALID_PHY_PAGE;
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
extern char _instruction_reserved_end;
#define INSTRUCTION_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4)
#define INSTRUCTION_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE)
size_t mmu_psram_get_text_segment_length(void)
{
uint32_t flash_pages = 0;
#if CONFIG_IDF_TARGET_ESP32S2
flash_pages += Cache_Count_Flash_Pages(PRO_CACHE_IBUS0, &page0_mapped);
@@ -55,9 +60,33 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
#elif CONFIG_IDF_TARGET_ESP32S3
flash_pages += Cache_Count_Flash_Pages(CACHE_IBUS, &page0_mapped);
#endif
if ((flash_pages + page_id) > BYTES_TO_MMU_PAGE(psram_size)) {
return MMU_PAGE_TO_BYTES(flash_pages);
}
void mmu_psram_get_instruction_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_instruction_reserved_end to ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4) as only byte-accessible
*gap_start = INSTRUCTION_ALIGNMENT_GAP_START;
*gap_end = INSTRUCTION_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_instruction_alignment_gap(const void *p)
{
if ((intptr_t)p >= INSTRUCTION_ALIGNMENT_GAP_START && (intptr_t)p < INSTRUCTION_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
uint32_t flash_bytes = mmu_psram_get_text_segment_length();
if ((flash_bytes + MMU_PAGE_TO_BYTES(page_id)) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash instructions, need %" PRIu32 " B, from %" PRIu32 " B to %" PRIu32 " B",
MMU_PAGE_TO_BYTES(flash_pages), MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(flash_pages + page_id));
flash_bytes, MMU_PAGE_TO_BYTES(start_page), flash_bytes + MMU_PAGE_TO_BYTES(page_id));
return ESP_FAIL;
}
@@ -87,10 +116,12 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#if CONFIG_SPIRAM_RODATA
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
extern char _rodata_reserved_end;
#define RODATA_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4)
#define RODATA_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE)
size_t mmu_psram_get_rodata_segment_length(void)
{
uint32_t flash_pages = 0;
#if CONFIG_IDF_TARGET_ESP32S2
flash_pages += Cache_Count_Flash_Pages(PRO_CACHE_IBUS2, &page0_mapped);
@@ -100,8 +131,33 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
#elif CONFIG_IDF_TARGET_ESP32S3
flash_pages += Cache_Count_Flash_Pages(CACHE_DBUS, &page0_mapped);
#endif
if ((flash_pages + page_id) > BYTES_TO_MMU_PAGE(psram_size)) {
ESP_EARLY_LOGE(TAG, "SPI RAM space not enough for the instructions, need to copy to %" PRIu32 " B.", MMU_PAGE_TO_BYTES(flash_pages + page_id));
return MMU_PAGE_TO_BYTES(flash_pages);
}
void mmu_psram_get_rodata_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_rodata_reserved_end to ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4) as only byte-accessible
*gap_start = RODATA_ALIGNMENT_GAP_START;
*gap_end = RODATA_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_rodata_alignment_gap(const void *p)
{
if ((intptr_t)p >= RODATA_ALIGNMENT_GAP_START && (intptr_t)p < RODATA_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
uint32_t flash_bytes = mmu_psram_get_rodata_segment_length();
if ((flash_bytes + MMU_PAGE_TO_BYTES(page_id)) > psram_size) {
ESP_EARLY_LOGE(TAG, "SPI RAM space not enough for the instructions, need to copy to %" PRIu32 " B.", flash_bytes + MMU_PAGE_TO_BYTES(page_id));
return ESP_FAIL;
}

View File

@@ -10,6 +10,7 @@
* The XIP PSRAM is done by CPU copy, v1(see mmu_psram_flash.c) is done by Cache copy
*/
#include <stdbool.h>
#include <sys/param.h>
#include <string.h>
#include "sdkconfig.h"
@@ -53,6 +54,17 @@ static int s_drom_paddr_offset;
static uint32_t s_do_load_from_flash(uint32_t flash_paddr_start, uint32_t size, uint32_t target_vaddr_start, uint32_t target_paddr_start)
{
uint32_t flash_end_page_vaddr = SOC_DRAM_FLASH_ADDRESS_HIGH - CONFIG_MMU_PAGE_SIZE;
uint32_t psram_vaddr_start;
#if !CONFIG_SPIRAM_BOOT_INIT
// Once PMP sets up the IROM/DROM split, the target_vaddr_start would not be configured to write (W) access. Thus, performing s_do_load_from_flash() post the PMP split
// is configured, copying the flash contents to the PSRAM would generate a store access fault.
// Thus, we need to choose a different PSRAM virtual address (that would have the PMP write (W) access) to map and copy the flash contents into the PSRAM.
// Choosing the second last PSRAM page instead of the last one, to avoid overlap with flash_end_page_vaddr for targets that share the same flash and psram virtual space.
uint32_t psram_second_last_page_vaddr = SOC_DRAM_PSRAM_ADDRESS_HIGH - 2 * CONFIG_MMU_PAGE_SIZE;
assert((psram_second_last_page_vaddr % CONFIG_MMU_PAGE_SIZE) == 0);
#endif
ESP_EARLY_LOGV(TAG, "flash_paddr_start: 0x%"PRIx32", flash_end_page_vaddr: 0x%"PRIx32", size: 0x%"PRIx32", target_vaddr_start: 0x%"PRIx32, flash_paddr_start, flash_end_page_vaddr, size, target_vaddr_start);
assert((flash_paddr_start % CONFIG_MMU_PAGE_SIZE) == 0);
assert((flash_end_page_vaddr % CONFIG_MMU_PAGE_SIZE) == 0);
@@ -61,15 +73,28 @@ static uint32_t s_do_load_from_flash(uint32_t flash_paddr_start, uint32_t size,
uint32_t mapped_size = 0;
while (mapped_size < size) {
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(MMU_LL_PSRAM_MMU_ID, MMU_TARGET_PSRAM0, target_vaddr_start, target_paddr_start + mapped_size, CONFIG_MMU_PAGE_SIZE, &actual_mapped_len);
#if !CONFIG_SPIRAM_BOOT_INIT
psram_vaddr_start = psram_second_last_page_vaddr;
#else
psram_vaddr_start = target_vaddr_start;
#endif /* !CONFIG_SPIRAM_BOOT_INIT*/
mmu_hal_map_region(MMU_LL_PSRAM_MMU_ID, MMU_TARGET_PSRAM0, psram_vaddr_start, target_paddr_start + mapped_size, CONFIG_MMU_PAGE_SIZE, &actual_mapped_len);
assert(actual_mapped_len == CONFIG_MMU_PAGE_SIZE);
mmu_hal_map_region(MMU_LL_FLASH_MMU_ID, MMU_TARGET_FLASH0, flash_end_page_vaddr, flash_paddr_start + mapped_size, CONFIG_MMU_PAGE_SIZE, &actual_mapped_len);
assert(actual_mapped_len == CONFIG_MMU_PAGE_SIZE);
cache_hal_invalidate_addr(target_vaddr_start, CONFIG_MMU_PAGE_SIZE);
cache_hal_invalidate_addr(psram_vaddr_start, CONFIG_MMU_PAGE_SIZE);
cache_hal_invalidate_addr(flash_end_page_vaddr, CONFIG_MMU_PAGE_SIZE);
memcpy((void *)target_vaddr_start, (void *)flash_end_page_vaddr, CONFIG_MMU_PAGE_SIZE);
memcpy((void *)psram_vaddr_start, (void *)flash_end_page_vaddr, CONFIG_MMU_PAGE_SIZE);
#if !CONFIG_SPIRAM_BOOT_INIT
cache_hal_writeback_addr(psram_vaddr_start, CONFIG_MMU_PAGE_SIZE);
mmu_hal_map_region(MMU_LL_PSRAM_MMU_ID, MMU_TARGET_PSRAM0, target_vaddr_start, target_paddr_start + mapped_size, CONFIG_MMU_PAGE_SIZE, &actual_mapped_len);
assert(actual_mapped_len == CONFIG_MMU_PAGE_SIZE);
cache_hal_invalidate_addr(target_vaddr_start, CONFIG_MMU_PAGE_SIZE);
#endif /* !CONFIG_SPIRAM_BOOT_INIT */
ESP_EARLY_LOGV(TAG, "target_vaddr_start: 0x%"PRIx32, target_vaddr_start);
mapped_size += CONFIG_MMU_PAGE_SIZE;
@@ -84,10 +109,35 @@ static uint32_t s_do_load_from_flash(uint32_t flash_paddr_start, uint32_t size,
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/* As heap memory is allocated in 4-byte aligned manner, we need to align the instruction to 4-byte boundary */
#define INSTRUCTION_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4)
/* The end of the instruction is aligned to CONFIG_MMU_PAGE_SIZE boundary as the flash instruction is mapped to PSRAM */
#define INSTRUCTION_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE)
size_t mmu_psram_get_text_segment_length(void)
{
return ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
}
void mmu_psram_get_instruction_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_instruction_reserved_end to ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4) as only byte-accessible
*gap_start = INSTRUCTION_ALIGNMENT_GAP_START;
*gap_end = INSTRUCTION_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_instruction_alignment_gap(const void *p)
{
if ((intptr_t)p >= INSTRUCTION_ALIGNMENT_GAP_START && (intptr_t)p < INSTRUCTION_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
size_t irom_size = ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_irom_size = irom_size;
s_irom_size = mmu_psram_get_text_segment_length();
uint32_t flash_drom_paddr_start = 0;
uint32_t flash_irom_paddr_start = 0;
@@ -95,8 +145,8 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
flash_irom_paddr_start = ALIGN_DOWN_BY(flash_irom_paddr_start, CONFIG_MMU_PAGE_SIZE);
ESP_EARLY_LOGV(TAG, "flash_irom_paddr_start: 0x%x", flash_irom_paddr_start);
if ((MMU_PAGE_TO_BYTES(start_page) + irom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash instructions, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", irom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + irom_size);
if ((MMU_PAGE_TO_BYTES(start_page) + s_irom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash instructions, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", s_irom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + s_irom_size);
return ESP_ERR_NO_MEM;
}
@@ -106,12 +156,12 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
ESP_EARLY_LOGV(TAG, "flash_irom_paddr_start: 0x%"PRIx32", MMU_PAGE_TO_BYTES(start_page): 0x%"PRIx32", s_irom_paddr_offset: 0x%"PRIx32", s_irom_vaddr_start: 0x%"PRIx32, flash_irom_paddr_start, MMU_PAGE_TO_BYTES(start_page), s_irom_paddr_offset, s_irom_vaddr_start);
uint32_t mapped_size = 0;
mapped_size = s_do_load_from_flash(flash_irom_paddr_start, irom_size, irom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(irom_load_addr_aligned, irom_size);
mapped_size = s_do_load_from_flash(flash_irom_paddr_start, s_irom_size, irom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(irom_load_addr_aligned, s_irom_size);
ESP_EARLY_LOGV(TAG, "after mapping text, starting from paddr=0x%08"PRIx32" and vaddr=0x%08"PRIx32", 0x%"PRIx32" bytes are mapped", MMU_PAGE_TO_BYTES(start_page), irom_load_addr_aligned, mapped_size);
*out_page = BYTES_TO_MMU_PAGE(irom_size);
*out_page = BYTES_TO_MMU_PAGE(s_irom_size);
ESP_EARLY_LOGI(TAG, ".text xip on psram");
return ESP_OK;
@@ -119,10 +169,36 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#if CONFIG_SPIRAM_RODATA
size_t mmu_psram_get_rodata_segment_length(void)
{
return ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
}
/* As heap memory is allocated in 4-byte aligned manner, we need to align the rodata to 4-byte boundary */
#define RODATA_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4)
/* The end of the rodata is aligned to CONFIG_MMU_PAGE_SIZE boundary as the flash rodata is mapped to PSRAM */
#define RODATA_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE)
void mmu_psram_get_rodata_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_rodata_reserved_end to ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4) as only byte-accessible
*gap_start = RODATA_ALIGNMENT_GAP_START;
*gap_end = RODATA_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_rodata_alignment_gap(const void *p)
{
if ((intptr_t)p >= RODATA_ALIGNMENT_GAP_START && (intptr_t)p < RODATA_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
size_t drom_size = ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_drom_size = drom_size;
s_drom_size = mmu_psram_get_rodata_segment_length();
uint32_t flash_drom_paddr_start = 0;
uint32_t flash_irom_paddr_start = 0;
@@ -130,8 +206,8 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
flash_drom_paddr_start = ALIGN_DOWN_BY(flash_drom_paddr_start, CONFIG_MMU_PAGE_SIZE);
ESP_EARLY_LOGV(TAG, "flash_drom_paddr_start: 0x%x", flash_drom_paddr_start);
if ((MMU_PAGE_TO_BYTES(start_page) + drom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash rodata, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", drom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + drom_size);
if ((MMU_PAGE_TO_BYTES(start_page) + s_drom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash rodata, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", s_drom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + s_drom_size);
return ESP_ERR_NO_MEM;
}
@@ -141,12 +217,12 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
ESP_EARLY_LOGV(TAG, "flash_drom_paddr_start: 0x%"PRIx32", MMU_PAGE_TO_BYTES(start_page): 0x%"PRIx32", s_drom_paddr_offset: 0x%"PRIx32", s_drom_vaddr_start: 0x%"PRIx32, flash_drom_paddr_start, MMU_PAGE_TO_BYTES(start_page), s_drom_paddr_offset, s_drom_vaddr_start);
uint32_t mapped_size = 0;
mapped_size = s_do_load_from_flash(flash_drom_paddr_start, drom_size, drom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(drom_load_addr_aligned, drom_size);
mapped_size = s_do_load_from_flash(flash_drom_paddr_start, s_drom_size, drom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(drom_load_addr_aligned, s_drom_size);
ESP_EARLY_LOGV(TAG, "after mapping rodata, starting from paddr=0x%08"PRIx32" and vaddr=0x%08"PRIx32", 0x%"PRIx32" bytes are mapped", MMU_PAGE_TO_BYTES(start_page), drom_load_addr_aligned, mapped_size);
*out_page = BYTES_TO_MMU_PAGE(drom_size);
*out_page = BYTES_TO_MMU_PAGE(s_drom_size);
ESP_EARLY_LOGI(TAG, ".rodata xip on psram");
return ESP_OK;

View File

@@ -278,6 +278,11 @@ SECTIONS
*/
. += _esp_flash_mmap_prefetch_pad_size;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
/* Align the end of flash text region as per PMP granularity as PSRAM memory protection is enabled */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
@@ -418,6 +423,12 @@ SECTIONS
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
#if CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
/* Align the end of flash rodata region as per PMP granularity as PSRAM memory protection is enabled */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
_thread_local_data_end = ABSOLUTE(.);
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)

View File

@@ -136,6 +136,11 @@ SECTIONS
*/
. += _esp_flash_mmap_prefetch_pad_size;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/* Align the end of flash text region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
@@ -276,6 +281,12 @@ SECTIONS
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
#if CONFIG_SPIRAM_RODATA
/* Align the end of flash rodata region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_RODATA
_thread_local_data_end = ABSOLUTE(.);
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)

View File

@@ -293,6 +293,11 @@ SECTIONS
*/
. += _esp_flash_mmap_prefetch_pad_size;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/* Align the end of flash text region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
@@ -440,6 +445,12 @@ SECTIONS
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
#if CONFIG_SPIRAM_RODATA
/* Align the end of flash rodata region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_RODATA
_thread_local_data_end = ABSOLUTE(.);
} > rodata_seg_low
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)

View File

@@ -565,19 +565,23 @@ void IRAM_ATTR call_start_cpu0(void)
#endif
#endif
#if CONFIG_SPIRAM_BOOT_HW_INIT
if (esp_psram_chip_init() != ESP_OK) {
#if CONFIG_SPIRAM_IGNORE_NOTFOUND
ESP_DRAM_LOGE(TAG, "Failed to init external RAM; continuing without it.");
#else
ESP_DRAM_LOGE(TAG, "Failed to init external RAM!");
abort();
#endif
}
#endif
#if CONFIG_SPIRAM_BOOT_INIT
if (esp_psram_init() != ESP_OK) {
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
ESP_DRAM_LOGE(TAG, "Failed to init external RAM, needed for external .bss segment");
abort();
#endif
#if CONFIG_SPIRAM_IGNORE_NOTFOUND
ESP_EARLY_LOGI(TAG, "Failed to init external RAM; continuing without it.");
#else
ESP_DRAM_LOGE(TAG, "Failed to init external RAM!");
abort();
#endif
}
#endif
@@ -792,13 +796,13 @@ void IRAM_ATTR call_start_cpu0(void)
}
#if CONFIG_IDF_TARGET_ESP32
#if !CONFIG_SPIRAM_BOOT_INIT
#if !CONFIG_SPIRAM_BOOT_HW_INIT
// If psram is uninitialized, we need to improve some flash configuration.
bootloader_flash_clock_config(&fhdr);
bootloader_flash_gpio_config(&fhdr);
bootloader_flash_dummy_config(&fhdr);
bootloader_flash_cs_timing_config();
#endif //!CONFIG_SPIRAM_BOOT_INIT
#endif //!CONFIG_SPIRAM_BOOT_HW_INIT
#endif //CONFIG_IDF_TARGET_ESP32
#if CONFIG_SPI_FLASH_SIZE_OVERRIDE

View File

@@ -414,7 +414,7 @@ __attribute__((weak)) void esp_perip_clk_init(void)
REG_CLR_BIT(LP_CLKRST_HP_CLK_CTRL_REG, LP_CLKRST_HP_SDIO_PLL2_CLK_EN);
REG_CLR_BIT(LP_CLKRST_HP_CLK_CTRL_REG, LP_CLKRST_HP_SDIO_PLL1_CLK_EN);
REG_CLR_BIT(LP_CLKRST_HP_CLK_CTRL_REG, LP_CLKRST_HP_SDIO_PLL0_CLK_EN);
#if !CONFIG_SPIRAM_BOOT_INIT
#if !CONFIG_SPIRAM_BOOT_HW_INIT
REG_CLR_BIT(LP_CLKRST_HP_CLK_CTRL_REG, LP_CLKRST_HP_MPLL_500M_CLK_EN);
#endif
REG_CLR_BIT(LP_SYSTEM_REG_HP_ROOT_CLK_CTRL_REG, LP_SYSTEM_REG_CPU_CLK_EN);

View File

@@ -44,6 +44,10 @@ void test_rtc_slow_reg2_execute_violation(void);
void test_irom_reg_write_violation(void);
void test_spiram_xip_irom_alignment_reg_execute_violation(void);
void test_spiram_xip_drom_alignment_reg_execute_violation(void);
void test_drom_reg_write_violation(void);
void test_drom_reg_execute_violation(void);

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -18,6 +18,9 @@
#include "test_panic.h"
#include "test_memprot.h"
#include "sdkconfig.h"
#include "soc/soc_caps.h"
/* Test Utility Functions */
#define BOOT_CMD_MAX_LEN (128)
@@ -172,6 +175,13 @@ void app_main(void)
HANDLE_TEST(test_name, test_irom_reg_write_violation);
HANDLE_TEST(test_name, test_drom_reg_write_violation);
HANDLE_TEST(test_name, test_drom_reg_execute_violation);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
HANDLE_TEST(test_name, test_spiram_xip_irom_alignment_reg_execute_violation);
#endif
#endif
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
HANDLE_TEST(test_name, test_spiram_xip_drom_alignment_reg_execute_violation);
#endif
#ifdef CONFIG_SOC_CPU_HAS_PMA

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -13,6 +13,7 @@
#include "esp_system.h"
#include "esp_log.h"
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "test_memprot.h"
#include "sdkconfig.h"
@@ -24,6 +25,8 @@ extern int _iram_start;
extern int _iram_text_start;
extern int _iram_text_end;
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
/* NOTE: Naming conventions for RTC_FAST_MEM are
* different for ESP32-C3 and other RISC-V targets
*/
@@ -245,8 +248,37 @@ void test_drom_reg_execute_violation(void)
func_ptr = (void(*)(void))foo_buf;
func_ptr();
}
// Check if the memory alignment gaps added to the heap are correctly configured
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
void test_spiram_xip_irom_alignment_reg_execute_violation(void)
{
extern int _instruction_reserved_end;
if (ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end)) - (uint32_t)(&_instruction_reserved_end) >= 4) {
void (*test_addr)(void) = (void(*)(void))((uint32_t)(&_instruction_reserved_end + 0x4));
printf("SPIRAM (IROM): Execute operation | Address: %p\n", test_addr);
test_addr();
} else {
printf("SPIRAM (IROM): IROM alignment gap not added into heap\n");
}
}
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED */
#endif
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
void test_spiram_xip_drom_alignment_reg_execute_violation(void)
{
extern int _rodata_reserved_end;
if (ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end)) - (uint32_t)(&_rodata_reserved_end) >= 4) {
void (*test_addr)(void) = (void(*)(void))((uint32_t)(&_rodata_reserved_end + 0x4));
printf("SPIRAM (DROM): Execute operation | Address: %p\n", test_addr);
test_addr();
} else {
printf("SPIRAM (DROM): DROM alignment gap not added into heap\n");
}
}
#endif /* CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2 */
#ifdef CONFIG_SOC_CPU_HAS_PMA
void test_invalid_memory_region_write_violation(void)
{

View File

@@ -163,7 +163,8 @@ def common_test(
dut.revert_log_level()
return # don't expect "Rebooting" output below
# We will only perform comparisons for ELF files, as we are not introducing any new fields to the binary file format.
# We will only perform comparisons for ELF files,
# as we are not introducing any new fields to the binary file format.
if 'bin' in config:
expected_coredump = None
@@ -719,6 +720,29 @@ CONFIGS_MEMPROT_FLASH_IDROM = list(
)
)
CONFIGS_MEMPROT_SPIRAM_XIP_IROM_ALIGNMENT_HEAP = list(
itertools.chain(
itertools.product(
['memprot_spiram_xip_esp32c5', 'memprot_spiram_xip_esp32c61', 'memprot_spiram_xip_esp32p4'],
['esp32c5', 'esp32c61', 'esp32p4'],
)
)
)
CONFIGS_MEMPROT_SPIRAM_XIP_DROM_ALIGNMENT_HEAP = list(
itertools.chain(
itertools.product(
[
'memprot_spiram_xip_esp32s3',
'memprot_spiram_xip_esp32c5',
'memprot_spiram_xip_esp32c61',
'memprot_spiram_xip_esp32p4',
],
['esp32s3', 'esp32c5', 'esp32c61', 'esp32p4'],
)
)
)
CONFIGS_MEMPROT_INVALID_REGION_PROTECTION_USING_PMA = list(
itertools.chain(
itertools.product(
@@ -1000,6 +1024,33 @@ def test_drom_reg_execute_violation(dut: PanicTestDut, test_func_name: str) -> N
dut.expect_cpu_reset()
@pytest.mark.generic
@idf_parametrize('config, target', CONFIGS_MEMPROT_SPIRAM_XIP_IROM_ALIGNMENT_HEAP, indirect=['config', 'target'])
def test_spiram_xip_irom_alignment_reg_execute_violation(dut: PanicTestDut, test_func_name: str) -> None:
dut.run_test_func(test_func_name)
try:
dut.expect_gme('Instruction access fault')
except Exception:
dut.expect_exact('SPIRAM (IROM): IROM alignment gap not added into heap')
dut.expect_reg_dump(0)
dut.expect_cpu_reset()
@pytest.mark.generic
@idf_parametrize('config, target', CONFIGS_MEMPROT_SPIRAM_XIP_DROM_ALIGNMENT_HEAP, indirect=['config', 'target'])
def test_spiram_xip_drom_alignment_reg_execute_violation(dut: PanicTestDut, test_func_name: str) -> None:
dut.run_test_func(test_func_name)
try:
if dut.target == 'esp32s3':
dut.expect_gme('InstructionFetchError')
else:
dut.expect_gme('Instruction access fault')
except Exception:
dut.expect_exact('SPIRAM (DROM): DROM alignment gap not added into heap')
dut.expect_reg_dump(0)
dut.expect_cpu_reset()
@pytest.mark.generic
@idf_parametrize('config, target', CONFIGS_MEMPROT_INVALID_REGION_PROTECTION_USING_PMA, indirect=['config', 'target'])
def test_invalid_memory_region_write_violation(dut: PanicTestDut, test_func_name: str) -> None:

View File

@@ -6,3 +6,7 @@ CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y

View File

@@ -6,3 +6,7 @@ CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y

View File

@@ -6,3 +6,7 @@ CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y

View File

@@ -0,0 +1,13 @@
# Restricting to ESP32C5
CONFIG_IDF_TARGET="esp32c5"
# Enabling memory protection
CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y

View File

@@ -0,0 +1,13 @@
# Restricting to ESP32C61
CONFIG_IDF_TARGET="esp32c61"
# Enabling memory protection
CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y

View File

@@ -0,0 +1,13 @@
# Restricting to ESP32P4
CONFIG_IDF_TARGET="esp32p4"
# Enabling memory protection
CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y

View File

@@ -0,0 +1,17 @@
# Restricting to ESP32S3
CONFIG_IDF_TARGET="esp32s3"
# Enabling memory protection
CONFIG_ESP_SYSTEM_MEMPROT_FEATURE=y
CONFIG_ESP_SYSTEM_MEMPROT_FEATURE_LOCK=y
# Enabling DCACHE
CONFIG_ESP32S3_DATA_CACHE_16KB=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y