feat(cpu_region_protect): Enable basic memory protection for SPIRAM

This commit is contained in:
harshal.patil
2025-02-18 23:28:26 +05:30
parent 49f253361e
commit ab229a34b3
20 changed files with 479 additions and 133 deletions

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -9,6 +9,9 @@
#include "esp_cpu.h"
#include "esp_fault.h"
#include "esp32c5/rom/rom_layout.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif /* CONFIG_SPIRAM */
#ifdef BOOTLOADER_BUILD
// Without L bit set
@@ -26,6 +29,7 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_UP(addr, align) ((addr) & ~((align) - 1))
static void esp_cpu_configure_invalid_regions(void)
{
@@ -178,15 +182,60 @@ void esp_cpu_configure_region_protection(void)
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
const uint32_t page_aligned_irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
__attribute__((unused)) const uint32_t page_aligned_drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
/**
Virtual space layout:
_________ <- SOC_IROM_LOW
| |
|_______| <- _instruction_reserved_end
|_______| <- page_aligned_irom_resv_end
| |
|_______| <- _rodata_reserved_end
|_______| <- page_aligned_drom_resv_end
| |
| |
| |
|_______| <- page_aligned_drom_resv_end + available_psram_heap
| |
| |
| |
| |
|_______| <- SOC_DROM_HIGH
if CONFIG_SPIRAM_FETCH_INSTRUCTIONS: [_instruction_reserved_end, page_aligned_irom_resv_end) in heap (RW)
if CONFIG_SPIRAM_RODATA: [_rodata_reserved_end, page_aligned_drom_resv_end) in heap (RW)
if CONFIG_SPIRAM: [_rodata_reserved_end, page_aligned_drom_resv_end + available_psram_heap] in heap / reserved for mapping (RW)
*/
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(7, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, drom_resv_end, PMP_TOR | R);
PMP_ENTRY_CFG_RESET(9);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(7, (uint32_t)(&_instruction_reserved_end), PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, PMP_TOR | RW);
#else
PMP_ENTRY_SET(7, page_aligned_irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, NONE);
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#if CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(9, (uint32_t)(&_rodata_reserved_end), PMP_TOR | R);
#else
PMP_ENTRY_SET(9, page_aligned_drom_resv_end, PMP_TOR | R);
#endif /* CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
size_t available_psram_heap = esp_psram_get_heap_size_to_protect();
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(10, ALIGN_UP(page_aligned_drom_resv_end + available_psram_heap, SOC_CPU_PMP_REGION_GRANULARITY), PMP_TOR | RW);
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#else
const uint32_t pmpaddr6 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
// Add the W attribute in the case of PSRAM
@@ -201,29 +250,31 @@ void esp_cpu_configure_region_protection(void)
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared
*/
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_CFG_RESET(14);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
// First part of LP mem is reserved for ULP coprocessor
#if CONFIG_ESP_SYSTEM_PMP_LP_CORE_RESERVE_MEM_EXECUTABLE
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RWX);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RWX);
#else
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RW);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RW);
#endif
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | CONDITIONAL_RWX);
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif
// 6. Peripheral addresses
const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW);
PMP_ENTRY_CFG_RESET(15);
const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -10,6 +10,9 @@
#include "esp_cpu.h"
#include "esp_fault.h"
#include "esp32c61/rom/rom_layout.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif /* CONFIG_SPIRAM */
#ifdef BOOTLOADER_BUILD
// Without L bit set
@@ -27,6 +30,7 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_UP(addr, align) ((addr) & ~((align) - 1))
static void esp_cpu_configure_invalid_regions(void)
{
@@ -172,16 +176,60 @@ void esp_cpu_configure_region_protection(void)
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
const uint32_t page_aligned_irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
__attribute__((unused)) const uint32_t page_aligned_drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_SET(7, SOC_IROM_LOW, NONE);
/**
Virtual space layout:
_________ <- SOC_IROM_LOW
| |
|_______| <- _instruction_reserved_end
|_______| <- page_aligned_irom_resv_end
| |
|_______| <- _rodata_reserved_end
|_______| <- page_aligned_drom_resv_end
| |
| |
| |
|_______| <- page_aligned_drom_resv_end + available_psram_heap
| |
| |
| |
| |
|_______| <- SOC_DROM_HIGH
if CONFIG_SPIRAM_FETCH_INSTRUCTIONS: [_instruction_reserved_end, page_aligned_irom_resv_end) in heap (RW)
if CONFIG_SPIRAM_RODATA: [_rodata_reserved_end, page_aligned_drom_resv_end) in heap (RW)
if CONFIG_SPIRAM: [_rodata_reserved_end, page_aligned_drom_resv_end + available_psram_heap] in heap / reserved for mapping (RW)
*/
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(7, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(8, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(9, drom_resv_end, PMP_TOR | R);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(8, (uint32_t)(&_instruction_reserved_end), PMP_TOR | RX);
PMP_ENTRY_SET(9, page_aligned_irom_resv_end, PMP_TOR | RW);
#else
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(9, page_aligned_irom_resv_end, NONE);
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#if CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
PMP_ENTRY_SET(10, (uint32_t)(&_rodata_reserved_end), PMP_TOR | R);
#else
PMP_ENTRY_SET(10, page_aligned_drom_resv_end, PMP_TOR | R);
#endif /* CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION*/
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
size_t available_psram_heap = esp_psram_get_heap_size_to_protect();
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_SET(11, ALIGN_UP(page_aligned_drom_resv_end + available_psram_heap, SOC_CPU_PMP_REGION_GRANULARITY), PMP_TOR | RW);
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#else
const uint32_t pmpaddr7 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
// Add the W attribute in the case of PSRAM
@@ -190,7 +238,7 @@ void esp_cpu_configure_region_protection(void)
#endif
// 5. Peripheral addresses
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | RW);
const uint32_t pmpaddr12 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(12, pmpaddr12, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -11,6 +11,9 @@
#include "esp_fault.h"
#include "hal/cache_ll.h"
#include "riscv/csr.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif /* CONFIG_SPIRAM */
#ifdef BOOTLOADER_BUILD
// Without L bit set
@@ -30,6 +33,7 @@
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_UP(addr, align) ((addr) & ~((align) - 1))
static void esp_cpu_configure_invalid_regions(void)
{
@@ -191,16 +195,34 @@ void esp_cpu_configure_region_protection(void)
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
const uint32_t page_aligned_irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
__attribute__((unused)) const uint32_t page_aligned_drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
// 5. I_Cache / D_Cache (flash)
#if CONFIG_SPIRAM_XIP_FROM_PSRAM && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
// We could have split CONFIG_SPIRAM_XIP_FROM_PSRAM into CONFIG_SPIRAM_FETCH_INSTRUCTIONS and CONFIG_SPIRAM_RODATA
// but we don't have enough PMP entries to do so thus not allowing us finer control over the memory regions
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_SET(6, SOC_EXTRAM_LOW, NONE);
PMP_ENTRY_SET(7, (uint32_t)(&_instruction_reserved_end), PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_irom_resv_end, PMP_TOR | RW);
PMP_ENTRY_SET(9, (uint32_t)(&_rodata_reserved_end), PMP_TOR | R);
size_t available_psram_heap = esp_psram_get_heap_size_to_protect();
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(10, ALIGN_UP(page_aligned_drom_resv_end + available_psram_heap, SOC_CPU_PMP_REGION_GRANULARITY), PMP_TOR | RW);
#else
PMP_ENTRY_CFG_RESET(6);
PMP_ENTRY_CFG_RESET(7);
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(7, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, drom_resv_end, PMP_TOR | R);
PMP_ENTRY_SET(7, page_aligned_irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(8, page_aligned_drom_resv_end, PMP_TOR | R);
#endif /* CONFIG_SPIRAM_XIP_FROM_PSRAM && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */
#else
// 5. I_Cache / D_Cache (flash)
const uint32_t pmpaddr6 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
@@ -215,28 +237,28 @@ void esp_cpu_configure_region_protection(void)
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared
*/
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE);
PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_CFG_RESET(14);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
// First part of LP mem is reserved for RTC reserved mem (shared between bootloader and app)
// as well as memory for ULP coprocessor
#if CONFIG_ESP_SYSTEM_PMP_LP_CORE_RESERVE_MEM_EXECUTABLE
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RWX);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RWX);
#else
PMP_ENTRY_SET(10, (int)&_rtc_text_start, PMP_TOR | RW);
PMP_ENTRY_SET(12, (int)&_rtc_text_start, PMP_TOR | RW);
#endif
PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | CONDITIONAL_RWX);
const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif
// 7. Peripheral addresses
const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW);
const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
}

View File

@@ -21,6 +21,30 @@ config SPIRAM_BOOT_INIT
have specific requirements, you'll want to leave this enabled so memory allocated
during boot-up can also be placed in PSRAM.
config SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
bool "Pre-configure memory protection for PSRAM"
default y if SPIRAM_BOOT_INIT
default n
depends on SPIRAM
help
If this is enabled, the PSRAM will be pre-configured for memory protection during initial boot.
This configuration takes into consideration the PSRAM memory configurations that are performed
by ESP-IDF's default PSRAM initialization function, esp_psram_init().
Thus, the config is enabled by default when SPIRAM_BOOT_INIT is enabled,
because the function esp_psram_init() would be called in the startup code.
In case you wish to disable SPIRAM_BOOT_INIT just for delaying the PSRAM initialization and plan
to use the ESP-IDF's default PSRAM initialization function, esp_psram_init() in the application code,
you should still enable this config to enable memory protection for the PSRAM.
Note that enabling this config also considers that the rest of the PSRAM memory that is left after
the memory configurations are performed by esp_psram_init(), can be allocated to the heap using the function
esp_psram_extram_add_to_heap_allocator(), thus configures this region with heap memory protection (RW).
As an advanced usage, if you plan to initialize the PSRAM memory regions manually by yourself without
using the function esp_psram_init(), you should disable this config to avoid any memory protection and
usage conflicts.
config SPIRAM_IGNORE_NOTFOUND
bool "Ignore PSRAM when not found"
default "n"

View File

@@ -79,6 +79,7 @@ esp_err_t esp_psram_chip_init(void);
* ext bss and ext noinit considerations, thus, even if the function is called before esp_psram_init(), it will return the final
* effective size of the PSRAM memory that would have been added into the heap after esp_psram_init() is performed
* instead of the vanilla size of the PSRAM memory.
* This function is only available if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION is enabled.
*/
size_t esp_psram_get_heap_size_to_protect(void);

View File

@@ -226,24 +226,6 @@ static void s_xip_psram_placement(uint32_t *psram_available_size, uint32_t *out_
}
#endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
static inline uint32_t s_get_ext_bss_size(void)
{
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_bss_end - (intptr_t)&_ext_ram_bss_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY */
}
static inline uint32_t s_get_ext_noinit_size(void)
{
#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_noinit_end - (intptr_t)&_ext_ram_noinit_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY */
}
static void s_psram_mapping(uint32_t psram_available_size, uint32_t start_page)
{
esp_err_t ret = ESP_FAIL;
@@ -376,77 +358,6 @@ esp_err_t esp_psram_chip_init(void)
return s_psram_chip_init();
}
/**
* @brief Calculates the effective PSRAM memory that would be / is mapped.
*
* @return The size of PSRAM memory that would be / is mapped in bytes, or 0 if PSRAM isn't successfully initialized
*/
static size_t esp_psram_get_effective_mapped_size(void)
{
size_t byte_aligned_size = 0;
size_t total_mapped_size = 0;
// return if the PSRAM is not enabled
if (!s_psram_ctx.is_chip_initialised) {
return 0;
}
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
uint32_t psram_available_size = 0;
esp_err_t ret = esp_psram_impl_get_available_size(&psram_available_size);
assert(ret == ESP_OK);
#if CONFIG_SPIRAM_RODATA
psram_available_size -= mmu_psram_get_rodata_segment_length();
#endif /* CONFIG_SPIRAM_RODATA */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
psram_available_size -= mmu_psram_get_text_segment_length();
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(byte_aligned_size, psram_available_size - total_mapped_size);
#if CONFIG_IDF_TARGET_ESP32S2
if (total_mapped_size < psram_available_size) {
size_t word_aligned_size = 0;
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(word_aligned_size, psram_available_size - total_mapped_size);
}
#endif
return total_mapped_size;
}
}
size_t esp_psram_get_heap_size_to_protect(void)
{
// return if the PSRAM is not enabled
if (!s_psram_ctx.is_chip_initialised) {
return 0;
}
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
size_t effective_mapped_size = esp_psram_get_effective_mapped_size();
if (effective_mapped_size == 0) {
return 0;
}
effective_mapped_size -= s_get_ext_bss_size();
effective_mapped_size -= s_get_ext_noinit_size();
#if CONFIG_IDF_TARGET_ESP32
effective_mapped_size -= esp_himem_reserved_area_size() - 1;
#endif
return effective_mapped_size;
}
}
esp_err_t esp_psram_init(void)
{
esp_err_t ret = ESP_FAIL;
@@ -697,3 +608,94 @@ void esp_psram_bss_init(void)
memset(&_ext_ram_bss_start, 0, size);
#endif
}
#if CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
static inline uint32_t s_get_ext_bss_size(void)
{
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_bss_end - (intptr_t)&_ext_ram_bss_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY */
}
static inline uint32_t s_get_ext_noinit_size(void)
{
#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
return ((intptr_t)&_ext_ram_noinit_end - (intptr_t)&_ext_ram_noinit_start);
#else
return 0;
#endif /* CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY */
}
/**
* @brief Calculates the effective PSRAM memory that would be / is mapped.
*
* @return The size of PSRAM memory that would be / is mapped in bytes, or 0 if PSRAM isn't successfully initialized
*/
static size_t esp_psram_get_effective_mapped_size(void)
{
size_t byte_aligned_size = 0;
size_t total_mapped_size = 0;
// return if the PSRAM is not enabled
if (!s_psram_ctx.is_chip_initialised) {
return 0;
}
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
uint32_t psram_available_size = 0;
esp_err_t ret = esp_psram_impl_get_available_size(&psram_available_size);
assert(ret == ESP_OK);
#if CONFIG_SPIRAM_RODATA
psram_available_size -= mmu_psram_get_rodata_segment_length();
#endif /* CONFIG_SPIRAM_RODATA */
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
psram_available_size -= mmu_psram_get_text_segment_length();
#endif /* CONFIG_SPIRAM_FETCH_INSTRUCTIONS */
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(byte_aligned_size, psram_available_size - total_mapped_size);
#if CONFIG_IDF_TARGET_ESP32S2
if (total_mapped_size < psram_available_size) {
size_t word_aligned_size = 0;
ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
assert(ret == ESP_OK);
total_mapped_size += MIN(word_aligned_size, psram_available_size - total_mapped_size);
}
#endif
return total_mapped_size;
}
}
size_t esp_psram_get_heap_size_to_protect(void)
{
// return if the PSRAM is not enabled
if (!s_psram_ctx.is_chip_initialised) {
return 0;
}
if (s_psram_ctx.is_initialised) {
return s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size;
} else {
size_t effective_mapped_size = esp_psram_get_effective_mapped_size();
if (effective_mapped_size == 0) {
return 0;
}
effective_mapped_size -= s_get_ext_bss_size();
effective_mapped_size -= s_get_ext_noinit_size();
#if CONFIG_IDF_TARGET_ESP32
effective_mapped_size -= esp_himem_reserved_area_size() - 1;
#endif
return effective_mapped_size;
}
}
#endif /* CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION */

View File

@@ -278,6 +278,11 @@ SECTIONS
*/
. += _esp_flash_mmap_prefetch_pad_size;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
/* Align the end of flash text region as per PMP granularity as PSRAM memory protection is enabled */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
@@ -418,6 +423,12 @@ SECTIONS
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
#if CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
/* Align the end of flash rodata region as per PMP granularity as PSRAM memory protection is enabled */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_RODATA && CONFIG_SPIRAM_PRE_CONFIGURE_MEMORY_PROTECTION
_thread_local_data_end = ABSOLUTE(.);
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)

View File

@@ -136,6 +136,11 @@ SECTIONS
*/
. += _esp_flash_mmap_prefetch_pad_size;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/* Align the end of flash text region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
@@ -276,6 +281,12 @@ SECTIONS
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
#if CONFIG_SPIRAM_RODATA
/* Align the end of flash rodata region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_RODATA
_thread_local_data_end = ABSOLUTE(.);
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)

View File

@@ -293,6 +293,11 @@ SECTIONS
*/
. += _esp_flash_mmap_prefetch_pad_size;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
/* Align the end of flash text region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_FETCH_INSTRUCTIONS
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
@@ -440,6 +445,12 @@ SECTIONS
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
#if CONFIG_SPIRAM_RODATA
/* Align the end of flash rodata region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
#endif // CONFIG_SPIRAM_RODATA
_thread_local_data_end = ABSOLUTE(.);
} > rodata_seg_low
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)