Merge branch 'feat/some_xip_psram_related_optimisations_and_fixes_v5.3' into 'release/v5.3'

feat(esp_psram): Add some wasted XIP PSRAM memory in heap and XIP PSRAM protection (v5.3)

See merge request espressif/esp-idf!38873
This commit is contained in:
Mahavir Jain
2025-05-22 12:00:14 +05:30
17 changed files with 574 additions and 64 deletions

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -11,6 +11,9 @@
#include "esp_fault.h"
#include "hal/cache_ll.h"
#include "riscv/csr.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif /* CONFIG_SPIRAM */
#ifdef BOOTLOADER_BUILD
// Without L bit set
@ -30,6 +33,7 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_UP(addr, align) ((addr) & ~((align) - 1))
static void esp_cpu_configure_invalid_regions(void)
{
@ -191,16 +195,34 @@ void esp_cpu_configure_region_protection(void)
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
const uint32_t page_aligned_irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
__attribute__((unused)) const uint32_t page_aligned_drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
// 5. I_Cache / D_Cache (flash)
#if CONFIG_SPIRAM_XIP_FROM_PSRAM && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
// We could have split CONFIG_SPIRAM_XIP_FROM_PSRAM into CONFIG_SPIRAM_FETCH_INSTRUCTIONS and CONFIG_SPIRAM_RODATA
// but we don't have enough PMP entries to do so thus not allowing us finer control over the memory regions
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_SET(6, SOC_EXTRAM_LOW, NONE);
PMP_ENTRY_SET(7, (uint32_t)(&_instruction_reserved_end), PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, PMP_TOR | RW);
PMP_ENTRY_SET(9, (uint32_t)(&_rodata_reserved_end), PMP_TOR | R);
size_t available_psram_heap = esp_psram_get_heap_size_to_protect();
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(10, ALIGN_UP(page_aligned_drom_resv_end + available_psram_heap, SOC_CPU_PMP_REGION_GRANULARITY), PMP_TOR | RW);
#else
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(7, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, drom_resv_end, PMP_TOR | R);
PMP_ENTRY_SET(7, page_aligned_irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_drom_resv_end, PMP_TOR | R);
#endif /* CONFIG_SPIRAM_XIP_FROM_PSRAM && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#else
// 5. I_Cache / D_Cache (flash)
const uint32_t pmpaddr6 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
@ -215,24 +237,24 @@ void esp_cpu_configure_region_protection(void)
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared
*/
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_CFG_RESET(14);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
// First part of LP mem is reserved for RTC reserved mem (shared between bootloader and app)
// as well as memory for ULP coprocessor
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RW);
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RW);
PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | CONDITIONAL_RWX);
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif
// 7. Peripheral addresses
const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW);
const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@ -10,6 +10,30 @@ config SPIRAM_BOOT_INIT
have specific requirements, you'll want to leave this enabled so memory allocated
during boot-up can also be placed in SPI RAM.
config SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
bool "Pre-configure memory protection for PSRAM"
default y if SPIRAM_BOOT_INIT
default n
depends on SPIRAM
help
If this is enabled, the PSRAM will be pre-configured for memory protection during initial boot.
This configuration takes into consideration the PSRAM memory configurations that are performed
by ESP-IDF's default PSRAM initialization function, esp_psram_init().
Thus, the config is enabled by default when SPIRAM_BOOT_INIT is enabled,
because the function esp_psram_init() would be called in the startup code.
In case you wish to disable SPIRAM_BOOT_INIT just for delaying the PSRAM initialization and plan
to use the ESP-IDF's default PSRAM initialization function, esp_psram_init() in the application code,
you should still enable this config to enable memory protection for the PSRAM.
Note that enabling this config also considers that the rest of the PSRAM memory that is left after
the memory configurations are performed by esp_psram_init(), can be allocated to the heap using the function
esp_psram_extram_add_to_heap_allocator(), thus configures this region with heap memory protection (RW).
As an advanced usage, if you plan to initialize the PSRAM memory regions manually by yourself without
using the function esp_psram_init(), you should disable this config to avoid any memory protection and
usage conflicts.
config SPIRAM_IGNORE_NOTFOUND
bool "Ignore PSRAM when not found"
default "n"

View File

@ -51,6 +51,14 @@
#define PSRAM_EARLY_LOGI ESP_EARLY_LOGI
#endif
#if CONFIG_SPIRAM_RODATA
extern uint8_t _rodata_reserved_end;
#endif /* CONFIG_SPIRAM_RODATA */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
extern uint8_t _instruction_reserved_end;
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
extern uint8_t _ext_ram_bss_start;
extern uint8_t _ext_ram_bss_end;
@ -61,6 +69,8 @@ extern uint8_t _ext_ram_noinit_start;
extern uint8_t _ext_ram_noinit_end;
#endif //#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
typedef struct {
intptr_t vaddr_start;
intptr_t vaddr_end;
@ -401,6 +411,36 @@ esp_err_t esp_psram_extram_add_to_heap_allocator(void)
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory to heap allocator",
(s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) / 1024);
// To allow using the page alignment gaps created while mapping the flash segments,
// the alignment gaps must be configured with correct memory protection configurations.
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
// Here, SOC_MMU_DI_VADDR_SHARED is necessary because, for the targets that have separate data and instruction virtual address spaces,
// the SPIRAM gap created due to the alignment needed while placing the instruction segment in the instruction virtual address space
// cannot be added in heap because the region cannot be configured with write permissions.
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
if ((uint32_t)&_instruction_reserved_end & (CONFIG_MMU_PAGE_SIZE - 1)) {
uint32_t instruction_alignment_gap_heap_start, instruction_alignment_gap_heap_end;
mmu_psram_get_instruction_alignment_gap_info(&instruction_alignment_gap_heap_start, &instruction_alignment_gap_heap_end);
ret = heap_caps_add_region_with_caps(byte_aligned_caps, instruction_alignment_gap_heap_start, instruction_alignment_gap_heap_end);
if (ret == ESP_OK) {
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory gap generated due to end address alignment of irom to the heap allocator", (instruction_alignment_gap_heap_end - instruction_alignment_gap_heap_start) / 1024);
}
}
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
// In the case of ESP32S2, the rodata is mapped to a read-only region (SOC_DROM0_ADDRESS_LOW - SOC_DROM0_ADDRESS_HIGH), thus we cannot add this region to the heap.
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
if ((uint32_t)&_rodata_reserved_end & (CONFIG_MMU_PAGE_SIZE - 1)) {
uint32_t rodata_alignment_gap_heap_start, rodata_alignment_gap_heap_end;
mmu_psram_get_rodata_alignment_gap_info(&rodata_alignment_gap_heap_start, &rodata_alignment_gap_heap_end);
ret = heap_caps_add_region_with_caps(byte_aligned_caps, rodata_alignment_gap_heap_start, rodata_alignment_gap_heap_end);
if (ret == ESP_OK) {
ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory gap generated due to end address alignment of drom to the heap allocator", (rodata_alignment_gap_heap_end - rodata_alignment_gap_heap_start) / 1024);
}
}
#endif /* CONFIG_SPIRAM_RODATA */
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
return ESP_OK;
}
@ -410,8 +450,24 @@ bool IRAM_ATTR esp_psram_check_ptr_addr(const void *p)
return false;
}
return ((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end) ||
((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
if (((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end) ||
((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end)) {
return true;
}
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
if (mmu_psram_check_ptr_addr_in_rodata_alignment_gap(p)) {
return true;
}
#endif /* CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2 */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
if (mmu_psram_check_ptr_addr_in_instruction_alignment_gap(p)) {
return true;
}
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED */
return false;
}
esp_err_t esp_psram_extram_reserve_dma_pool(size_t size)
@ -541,3 +597,84 @@ void esp_psram_bss_init(void)
memset(&_ext_ram_bss_start, 0, size);
#endif
}
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
static inline uint32_t s_get_ext_bss_size(void)
{
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_bss_end - (intptr_t)&_ext_ram_bss_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY */
}
static inline uint32_t s_get_ext_noinit_size(void)
{
#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_noinit_end - (intptr_t)&_ext_ram_noinit_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY */
}
/**
* @brief Calculates the effective PSRAM memory that would be / is mapped.
*
* @return The size of PSRAM memory that would be / is mapped in bytes, or 0 if PSRAM isn't successfully initialized
*/
static size_t esp_psram_get_effective_mapped_size(void)
{
size_t byte_aligned_size = 0;
size_t total_mapped_size = 0;
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
uint32_t psram_available_size = 0;
esp_err_t ret = esp_psram_impl_get_available_size(&psram_available_size);
assert(ret == ESP_OK);
#if CONFIG_SPIRAM_RODATA
psram_available_size -= mmu_psram_get_rodata_segment_length();
#endif /* CONFIG_SPIRAM_RODATA */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
psram_available_size -= mmu_psram_get_text_segment_length();
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(byte_aligned_size, psram_available_size - total_mapped_size);
#if CONFIG_IDF_TARGET_ESP32S2
if (total_mapped_size < psram_available_size) {
size_t word_aligned_size = 0;
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(word_aligned_size, psram_available_size - total_mapped_size);
}
#endif
return total_mapped_size;
}
}
size_t esp_psram_get_heap_size_to_protect(void)
{
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
size_t effective_mapped_size = esp_psram_get_effective_mapped_size();
if (effective_mapped_size == 0) {
return 0;
}
effective_mapped_size -= s_get_ext_bss_size();
effective_mapped_size -= s_get_ext_noinit_size();
#if CONFIG_IDF_TARGET_ESP32
effective_mapped_size -= esp_himem_reserved_area_size() - 1;
#endif
return effective_mapped_size;
}
}
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -60,6 +60,18 @@ bool esp_psram_extram_test(void);
*/
void esp_psram_bss_init(void);
/**
* @brief Calculates the effective PSRAM memory that would be / is added into the heap.
*
* @return The size of PSRAM memory that would be / is added into the heap in bytes, or 0 if PSRAM hardware isn't successfully initialized
* @note The function pre-calculates the effective size of the PSRAM memory that would be added into the heap after performing the XIP or
* ext bss and ext noinit considerations, thus, even if the function is called before esp_psram_init(), it will return the final
* effective size of the PSRAM memory that would have been added into the heap after esp_psram_init() is performed
* instead of the vanilla size of the PSRAM memory.
* This function is only available if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION is enabled.
*/
size_t esp_psram_get_heap_size_to_protect(void);
#if CONFIG_IDF_TARGET_ESP32
/**
* @brief Force a writeback of the data in the PSRAM cache. This is to be called whenever

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -39,6 +39,31 @@ extern "C" {
Part 1 APIs (See @Backgrounds on top of this file)
-------------------------------------------------------------------------------*/
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/**
* @brief Calculates the size of memory that would be used for copying flash texts into PSRAM (in bytes)
*
* @return size_t size of memory that would be used for copying flash texts into PSRAM (in bytes)
*/
size_t mmu_psram_get_text_segment_length(void);
/**
* @brief Get the start and size of the instruction segment alignment gap
*
* @param[out] gap_start Start of the gap
* @param[out] gap_size Size of the gap
*/
void mmu_psram_get_instruction_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_size);
/**
* @brief Check if the pointer is in the instruction alignment gap
*
* @param[in] p Pointer to check
*
* @return true if the pointer is in the instruction alignment gap, false otherwise
*/
bool mmu_psram_check_ptr_addr_in_instruction_alignment_gap(const void *p);
/**
* @brief Copy Flash texts to PSRAM
*
@ -50,6 +75,22 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#if CONFIG_SPIRAM_RODATA
/**
* @brief Get the start and size of the rodata segment alignment gap
*
* @param[out] gap_start Start of the gap
* @param[out] gap_size Size of the gap
*/
void mmu_psram_get_rodata_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_size);
/**
* @brief Calculates the size of memory that would be used for copying flash rodata into PSRAM (in bytes)
*
* @return size_t size of memory that would be used for copying flash rodata into PSRAM (in bytes)
*/
size_t mmu_psram_get_rodata_segment_length(void);
/**
* @brief Copy Flash rodata to PSRAM
*
@ -58,6 +99,15 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
* @param[out] out_page Used pages
*/
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page);
/**
* @brief Check if the pointer is in the rodata alignment gap
*
* @param[in] p Pointer to check
*
* @return true if the pointer is in the rodata alignment gap, false otherwise
*/
bool mmu_psram_check_ptr_addr_in_rodata_alignment_gap(const void *p);
#endif //#if CONFIG_SPIRAM_RODATA
/*----------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -16,6 +16,7 @@
* APIs in 2 will be refactored when MMU driver is ready
*/
#include <stdbool.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_log.h"
@ -31,6 +32,8 @@
#include "esp32s3/rom/cache.h"
#endif
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
/*----------------------------------------------------------------------------
Part 1 APIs (See @Backgrounds on top of this file)
-------------------------------------------------------------------------------*/
@ -44,10 +47,12 @@ static uint32_t page0_page = INVALID_PHY_PAGE;
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
extern char _instruction_reserved_end;
#define INSTRUCTION_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4)
#define INSTRUCTION_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE)
size_t mmu_psram_get_text_segment_length(void)
{
uint32_t flash_pages = 0;
#if CONFIG_IDF_TARGET_ESP32S2
flash_pages += Cache_Count_Flash_Pages(PRO_CACHE_IBUS0, &page0_mapped);
@ -55,9 +60,33 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
#elif CONFIG_IDF_TARGET_ESP32S3
flash_pages += Cache_Count_Flash_Pages(CACHE_IBUS, &page0_mapped);
#endif
if ((flash_pages + page_id) > BYTES_TO_MMU_PAGE(psram_size)) {
return MMU_PAGE_TO_BYTES(flash_pages);
}
void mmu_psram_get_instruction_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_instruction_reserved_end to ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4) as only byte-accessible
*gap_start = INSTRUCTION_ALIGNMENT_GAP_START;
*gap_end = INSTRUCTION_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_instruction_alignment_gap(const void *p)
{
if ((intptr_t)p >= INSTRUCTION_ALIGNMENT_GAP_START && (intptr_t)p < INSTRUCTION_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
uint32_t flash_bytes = mmu_psram_get_text_segment_length();
if ((flash_bytes + MMU_PAGE_TO_BYTES(page_id)) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash instructions, need %" PRIu32 " B, from %" PRIu32 " B to %" PRIu32 " B",
MMU_PAGE_TO_BYTES(flash_pages), MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(flash_pages + page_id));
flash_bytes, MMU_PAGE_TO_BYTES(start_page), flash_bytes + MMU_PAGE_TO_BYTES(page_id));
return ESP_FAIL;
}
@ -87,10 +116,12 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#if CONFIG_SPIRAM_RODATA
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
extern char _rodata_reserved_end;
#define RODATA_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4)
#define RODATA_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE)
size_t mmu_psram_get_rodata_segment_length(void)
{
uint32_t flash_pages = 0;
#if CONFIG_IDF_TARGET_ESP32S2
flash_pages += Cache_Count_Flash_Pages(PRO_CACHE_IBUS2, &page0_mapped);
@ -100,8 +131,33 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
#elif CONFIG_IDF_TARGET_ESP32S3
flash_pages += Cache_Count_Flash_Pages(CACHE_DBUS, &page0_mapped);
#endif
if ((flash_pages + page_id) > BYTES_TO_MMU_PAGE(psram_size)) {
ESP_EARLY_LOGE(TAG, "SPI RAM space not enough for the instructions, need to copy to %" PRIu32 " B.", MMU_PAGE_TO_BYTES(flash_pages + page_id));
return MMU_PAGE_TO_BYTES(flash_pages);
}
void mmu_psram_get_rodata_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_rodata_reserved_end to ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4) as only byte-accessible
*gap_start = RODATA_ALIGNMENT_GAP_START;
*gap_end = RODATA_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_rodata_alignment_gap(const void *p)
{
if ((intptr_t)p >= RODATA_ALIGNMENT_GAP_START && (intptr_t)p < RODATA_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
uint32_t page_id = start_page;
uint32_t flash_bytes = mmu_psram_get_rodata_segment_length();
if ((flash_bytes + MMU_PAGE_TO_BYTES(page_id)) > psram_size) {
ESP_EARLY_LOGE(TAG, "SPI RAM space not enough for the instructions, need to copy to %" PRIu32 " B.", flash_bytes + MMU_PAGE_TO_BYTES(page_id));
return ESP_FAIL;
}
@ -136,10 +192,18 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
/*----------------------------------------------------------------------------
Part 2 APIs (See @Backgrounds on top of this file)
-------------------------------------------------------------------------------*/
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
/**
* If using `int`, then for CLANG, with enabled optimization when inlined function is provided with the address of external symbol, the two least bits of the constant used inside that function get cleared.
* Optimizer assumes that address of external symbol should be aligned to 4-bytes and therefore aligns constant value used for bitwise AND operation with that address.
*
* This means `extern int _instruction_reserved_start;` can be unaligned to 4 bytes, whereas using `char` can solve this issue.
*
* As we only use these symbol address, we declare them as `char` here
*/
extern char _instruction_reserved_start;
extern char _instruction_reserved_end;
extern char _rodata_reserved_start;
extern char _rodata_reserved_end;
//------------------------------------Copy Flash .text to PSRAM-------------------------------------//
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS

View File

@ -10,6 +10,7 @@
* The XIP PSRAM is done by CPU copy, v1(see mmu_psram_flash.c) is done by Cache copy
*/
#include <stdbool.h>
#include <sys/param.h>
#include <string.h>
#include "sdkconfig.h"
@ -28,10 +29,18 @@
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
#define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
/**
* If using `int`, then for CLANG, with enabled optimization when inlined function is provided with the address of external symbol, the two least bits of the constant used inside that function get cleared.
* Optimizer assumes that address of external symbol should be aligned to 4-bytes and therefore aligns constant value used for bitwise AND operation with that address.
*
* This means `extern int _instruction_reserved_start;` can be unaligned to 4 bytes, whereas using `char` can solve this issue.
*
* As we only use these symbol address, we declare them as `char` here
*/
extern char _instruction_reserved_start;
extern char _instruction_reserved_end;
extern char _rodata_reserved_start;
extern char _rodata_reserved_end;
const static char *TAG = "mmu_psram";
static uint32_t s_irom_vaddr_start;
@ -76,10 +85,35 @@ static uint32_t s_do_load_from_flash(uint32_t flash_paddr_start, uint32_t size,
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/* As heap memory is allocated in 4-byte aligned manner, we need to align the instruction to 4-byte boundary */
#define INSTRUCTION_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4)
/* The end of the instruction is aligned to CONFIG_MMU_PAGE_SIZE boundary as the flash instruction is mapped to PSRAM */
#define INSTRUCTION_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE)
size_t mmu_psram_get_text_segment_length(void)
{
return ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
}
void mmu_psram_get_instruction_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_instruction_reserved_end to ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, 4) as only byte-accessible
*gap_start = INSTRUCTION_ALIGNMENT_GAP_START;
*gap_end = INSTRUCTION_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_instruction_alignment_gap(const void *p)
{
if ((intptr_t)p >= INSTRUCTION_ALIGNMENT_GAP_START && (intptr_t)p < INSTRUCTION_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
size_t irom_size = ALIGN_UP_BY((uint32_t)&_instruction_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_irom_size = irom_size;
s_irom_size = mmu_psram_get_text_segment_length();
uint32_t flash_drom_paddr_start = 0;
uint32_t flash_irom_paddr_start = 0;
@ -87,8 +121,8 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
flash_irom_paddr_start = ALIGN_DOWN_BY(flash_irom_paddr_start, CONFIG_MMU_PAGE_SIZE);
ESP_EARLY_LOGI(TAG, "flash_irom_paddr_start: 0x%x", flash_irom_paddr_start);
if ((MMU_PAGE_TO_BYTES(start_page) + irom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash instructions, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", irom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + irom_size);
if ((MMU_PAGE_TO_BYTES(start_page) + s_irom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash instructions, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", s_irom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + s_irom_size);
return ESP_ERR_NO_MEM;
}
@ -98,22 +132,48 @@ esp_err_t mmu_config_psram_text_segment(uint32_t start_page, uint32_t psram_size
ESP_EARLY_LOGV(TAG, "flash_irom_paddr_start: 0x%"PRIx32", MMU_PAGE_TO_BYTES(start_page): 0x%"PRIx32", s_irom_paddr_offset: 0x%"PRIx32", s_irom_vaddr_start: 0x%"PRIx32, flash_irom_paddr_start, MMU_PAGE_TO_BYTES(start_page), s_irom_paddr_offset, s_irom_vaddr_start);
uint32_t mapped_size = 0;
mapped_size = s_do_load_from_flash(flash_irom_paddr_start, irom_size, irom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(irom_load_addr_aligned, irom_size);
mapped_size = s_do_load_from_flash(flash_irom_paddr_start, s_irom_size, irom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(irom_load_addr_aligned, s_irom_size);
ESP_EARLY_LOGV(TAG, "after mapping text, starting from paddr=0x%08"PRIx32" and vaddr=0x%08"PRIx32", 0x%"PRIx32" bytes are mapped", MMU_PAGE_TO_BYTES(start_page), irom_load_addr_aligned, mapped_size);
*out_page = BYTES_TO_MMU_PAGE(irom_size);
*out_page = BYTES_TO_MMU_PAGE(s_irom_size);
return ESP_OK;
}
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
#if CONFIG_SPIRAM_RODATA
size_t mmu_psram_get_rodata_segment_length(void)
{
return ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
}
/* As heap memory is allocated in 4-byte aligned manner, we need to align the rodata to 4-byte boundary */
#define RODATA_ALIGNMENT_GAP_START ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4)
/* The end of the rodata is aligned to CONFIG_MMU_PAGE_SIZE boundary as the flash rodata is mapped to PSRAM */
#define RODATA_ALIGNMENT_GAP_END ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE)
void mmu_psram_get_rodata_alignment_gap_info(uint32_t *gap_start, uint32_t *gap_end)
{
// As we need the memory to start with word aligned address, max virtual space that could be wasted = 3 bytes
// Or create a new region from (uint32_t)&_rodata_reserved_end to ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, 4) as only byte-accessible
*gap_start = RODATA_ALIGNMENT_GAP_START;
*gap_end = RODATA_ALIGNMENT_GAP_END;
}
bool IRAM_ATTR mmu_psram_check_ptr_addr_in_rodata_alignment_gap(const void *p)
{
if ((intptr_t)p >= RODATA_ALIGNMENT_GAP_START && (intptr_t)p < RODATA_ALIGNMENT_GAP_END) {
return true;
}
return false;
}
esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_size, uint32_t *out_page)
{
size_t drom_size = ALIGN_UP_BY((uint32_t)&_rodata_reserved_end, CONFIG_MMU_PAGE_SIZE) - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
s_drom_size = drom_size;
s_drom_size = mmu_psram_get_rodata_segment_length();
uint32_t flash_drom_paddr_start = 0;
uint32_t flash_irom_paddr_start = 0;
@ -121,8 +181,8 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
flash_drom_paddr_start = ALIGN_DOWN_BY(flash_drom_paddr_start, CONFIG_MMU_PAGE_SIZE);
ESP_EARLY_LOGI(TAG, "flash_drom_paddr_start: 0x%x", flash_drom_paddr_start);
if ((MMU_PAGE_TO_BYTES(start_page) + drom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash rodata, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", drom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + drom_size);
if ((MMU_PAGE_TO_BYTES(start_page) + s_drom_size) > psram_size) {
ESP_EARLY_LOGE(TAG, "PSRAM space not enough for the Flash rodata, need %"PRId32" B, from %"PRId32" B to %"PRId32" B", s_drom_size, MMU_PAGE_TO_BYTES(start_page), MMU_PAGE_TO_BYTES(start_page) + s_drom_size);
return ESP_ERR_NO_MEM;
}
@ -132,12 +192,12 @@ esp_err_t mmu_config_psram_rodata_segment(uint32_t start_page, uint32_t psram_si
ESP_EARLY_LOGV(TAG, "flash_drom_paddr_start: 0x%"PRIx32", MMU_PAGE_TO_BYTES(start_page): 0x%"PRIx32", s_drom_paddr_offset: 0x%"PRIx32", s_drom_vaddr_start: 0x%"PRIx32, flash_drom_paddr_start, MMU_PAGE_TO_BYTES(start_page), s_drom_paddr_offset, s_drom_vaddr_start);
uint32_t mapped_size = 0;
mapped_size = s_do_load_from_flash(flash_drom_paddr_start, drom_size, drom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(drom_load_addr_aligned, drom_size);
mapped_size = s_do_load_from_flash(flash_drom_paddr_start, s_drom_size, drom_load_addr_aligned, MMU_PAGE_TO_BYTES(start_page));
cache_hal_writeback_addr(drom_load_addr_aligned, s_drom_size);
ESP_EARLY_LOGV(TAG, "after mapping rodata, starting from paddr=0x%08"PRIx32" and vaddr=0x%08"PRIx32", 0x%"PRIx32" bytes are mapped", MMU_PAGE_TO_BYTES(start_page), drom_load_addr_aligned, mapped_size);
*out_page = BYTES_TO_MMU_PAGE(drom_size);
*out_page = BYTES_TO_MMU_PAGE(s_drom_size);
return ESP_OK;
}

View File

@ -295,6 +295,13 @@ SECTIONS
*/
. += _esp_flash_mmap_prefetch_pad_size;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
/* Align the end of flash text region as per PMP granularity to allow using the
* page alignment gap created while mapping the flash region into the PSRAM memory.
*/
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
@ -444,6 +451,14 @@ SECTIONS
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
#if CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
/* Align the end of flash rodata region as per PMP granularity to allow using the
* page alignment gap created while mapping the flash region into the PSRAM memory.
*/
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
_thread_local_data_end = ABSOLUTE(.);
} > rodata_seg_low
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)

View File

@ -138,10 +138,19 @@ extern int _rtc_bss_end;
extern int _bss_bt_start;
extern int _bss_bt_end;
#endif // CONFIG_BT_LE_RELEASE_IRAM_SUPPORTED
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
/**
* If using `int`, then for CLANG, with enabled optimization when inlined function is provided with the address of external symbol, the two least bits of the constant used inside that function get cleared.
* Optimizer assumes that address of external symbol should be aligned to 4-bytes and therefore aligns constant value used for bitwise AND operation with that address.
*
* This means `extern int _instruction_reserved_start;` can be unaligned to 4 bytes, whereas using `char` can solve this issue.
*
* As we only use these symbol address, we declare them as `char` here
*/
extern char _instruction_reserved_start;
extern char _instruction_reserved_end;
extern char _rodata_reserved_start;
extern char _rodata_reserved_end;
extern int _vector_table;
#if SOC_INT_CLIC_SUPPORTED

View File

@ -36,13 +36,13 @@
#include "spi_flash_mmap.h"
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern char _instruction_reserved_start;
extern char _instruction_reserved_end;
#endif
#if CONFIG_SPIRAM_RODATA
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
extern char _rodata_reserved_start;
extern char _rodata_reserved_end;
#endif
#if !CONFIG_SPI_FLASH_ROM_IMPL

View File

@ -44,6 +44,10 @@ void test_rtc_slow_reg2_execute_violation(void);
void test_irom_reg_write_violation(void);
void test_spiram_xip_irom_alignment_reg_execute_violation(void);
void test_spiram_xip_drom_alignment_reg_execute_violation(void);
void test_drom_reg_write_violation(void);
void test_drom_reg_execute_violation(void);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -18,6 +18,9 @@
#include "test_panic.h"
#include "test_memprot.h"
#include "sdkconfig.h"
#include "soc/soc_caps.h"
/* Test Utility Functions */
#define BOOT_CMD_MAX_LEN (128)
@ -170,6 +173,13 @@ void app_main(void)
HANDLE_TEST(test_name, test_irom_reg_write_violation);
HANDLE_TEST(test_name, test_drom_reg_write_violation);
HANDLE_TEST(test_name, test_drom_reg_execute_violation);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
HANDLE_TEST(test_name, test_spiram_xip_irom_alignment_reg_execute_violation);
#endif
#endif
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
HANDLE_TEST(test_name, test_spiram_xip_drom_alignment_reg_execute_violation);
#endif
#ifdef CONFIG_SOC_CPU_HAS_PMA

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2021-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2021-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -13,6 +13,7 @@
#include "esp_system.h"
#include "esp_log.h"
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "test_memprot.h"
#include "sdkconfig.h"
@ -24,6 +25,8 @@ extern int _iram_start;
extern int _iram_text_start;
extern int _iram_text_end;
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
/* NOTE: Naming conventions for RTC_FAST_MEM are
* different for ESP32-C3 and other RISC-V targets
*/
@ -245,8 +248,37 @@ void test_drom_reg_execute_violation(void)
func_ptr = (void(*)(void))foo_buf;
func_ptr();
}
// Check if the memory alignment gaps added to the heap are correctly configured
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED
void test_spiram_xip_irom_alignment_reg_execute_violation(void)
{
extern int _instruction_reserved_end;
if (ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end)) - (uint32_t)(&_instruction_reserved_end) >= 4) {
void (*test_addr)(void) = (void(*)(void))((uint32_t)(&_instruction_reserved_end + 0x4));
printf("SPIRAM (IROM): Execute operation | Address: %p\n", test_addr);
test_addr();
} else {
printf("SPIRAM (IROM): IROM alignment gap not added into heap\n");
}
}
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && SOC_MMU_DI_VADDR_SHARED */
#endif
#if CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2
void test_spiram_xip_drom_alignment_reg_execute_violation(void)
{
extern int _rodata_reserved_end;
if (ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end)) - (uint32_t)(&_rodata_reserved_end) >= 4) {
void (*test_addr)(void) = (void(*)(void))((uint32_t)(&_rodata_reserved_end + 0x4));
printf("SPIRAM (DROM): Execute operation | Address: %p\n", test_addr);
test_addr();
} else {
printf("SPIRAM (DROM): DROM alignment gap not added into heap\n");
}
}
#endif /* CONFIG_SPIRAM_RODATA && !CONFIG_IDF_TARGET_ESP32S2 */
#ifdef CONFIG_SOC_CPU_HAS_PMA
void test_invalid_memory_region_write_violation(void)
{

View File

@ -157,7 +157,8 @@ def common_test(dut: PanicTestDut, config: str, expected_backtrace: Optional[Lis
dut.revert_log_level()
return # don't expect "Rebooting" output below
# We will only perform comparisons for ELF files, as we are not introducing any new fields to the binary file format.
# We will only perform comparisons for ELF files,
# as we are not introducing any new fields to the binary file format.
if 'bin' in config:
expected_coredump = None
@ -751,6 +752,15 @@ CONFIGS_MEMPROT_FLASH_IDROM = [
pytest.param('memprot_esp32p4', marks=[pytest.mark.esp32p4])
]
CONFIGS_MEMPROT_SPIRAM_XIP_IROM_ALIGNMENT_HEAP = [
pytest.param('memprot_spiram_xip_esp32p4', marks=[pytest.mark.esp32p4])
]
CONFIGS_MEMPROT_SPIRAM_XIP_DROM_ALIGNMENT_HEAP = [
pytest.param('memprot_spiram_xip_esp32s3', marks=[pytest.mark.esp32s3]),
pytest.param('memprot_spiram_xip_esp32p4', marks=[pytest.mark.esp32p4])
]
CONFIGS_MEMPROT_INVALID_REGION_PROTECTION_USING_PMA = [
pytest.param('memprot_esp32c6', marks=[pytest.mark.esp32c6]),
pytest.param('memprot_esp32h2', marks=[pytest.mark.esp32h2]),
@ -1020,8 +1030,35 @@ def test_drom_reg_execute_violation(dut: PanicTestDut, test_func_name: str) -> N
dut.expect_cpu_reset()
@pytest.mark.parametrize('config', CONFIGS_MEMPROT_INVALID_REGION_PROTECTION_USING_PMA, indirect=True)
@pytest.mark.parametrize('config', CONFIGS_MEMPROT_SPIRAM_XIP_IROM_ALIGNMENT_HEAP, indirect=True)
@pytest.mark.generic
def test_spiram_xip_irom_alignment_reg_execute_violation(dut: PanicTestDut, test_func_name: str) -> None:
dut.run_test_func(test_func_name)
try:
dut.expect_gme('Instruction access fault')
except Exception:
dut.expect_exact('SPIRAM (IROM): IROM alignment gap not added into heap')
dut.expect_reg_dump(0)
dut.expect_cpu_reset()
@pytest.mark.parametrize('config', CONFIGS_MEMPROT_SPIRAM_XIP_DROM_ALIGNMENT_HEAP, indirect=True)
@pytest.mark.generic
def test_spiram_xip_drom_alignment_reg_execute_violation(dut: PanicTestDut, test_func_name: str) -> None:
dut.run_test_func(test_func_name)
try:
if dut.target == 'esp32s3':
dut.expect_gme('InstructionFetchError')
else:
dut.expect_gme('Instruction access fault')
except Exception:
dut.expect_exact('SPIRAM (DROM): DROM alignment gap not added into heap')
dut.expect_reg_dump(0)
dut.expect_cpu_reset()
@pytest.mark.generic
@pytest.mark.parametrize('config', CONFIGS_MEMPROT_INVALID_REGION_PROTECTION_USING_PMA, indirect=True)
def test_invalid_memory_region_write_violation(dut: PanicTestDut, test_func_name: str) -> None:
dut.run_test_func(test_func_name)
dut.expect_gme('Store access fault')

View File

@ -6,3 +6,7 @@ CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y

View File

@ -0,0 +1,13 @@
# Restricting to ESP32P4
CONFIG_IDF_TARGET="esp32p4"
# Enabling memory protection
CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y

View File

@ -0,0 +1,17 @@
# Restricting to ESP32S3
CONFIG_IDF_TARGET="esp32s3"
# Enabling memory protection
CONFIG_ESP_SYSTEM_MEMPROT_FEATURE=y
CONFIG_ESP_SYSTEM_MEMPROT_FEATURE_LOCK=y
# Enabling DCACHE
CONFIG_ESP32S3_DATA_CACHE_16KB=y
# Enable memprot test
CONFIG_TEST_MEMPROT=y
# Enable SPIRAM to check the alignment gap's memory protection
CONFIG_SPIRAM=y
CONFIG_SPIRAM_USE_CAPS_ALLOC=y
CONFIG_SPIRAM_XIP_FROM_PSRAM=y