feat(esp_hw_support): Support memory protection using PMA and PMP for ESP32-C5

This commit is contained in:
harshal.patil
2024-05-31 22:31:57 +05:30
parent 980ac9bcf5
commit 84afc6a955
6 changed files with 86 additions and 73 deletions

View File

@@ -248,6 +248,8 @@ menu "Bootloader config"
Protects the unmapped memory regions of the entire address space from unintended accesses. Protects the unmapped memory regions of the entire address space from unintended accesses.
This will ensure that an exception will be triggered whenever the CPU performs a memory This will ensure that an exception will be triggered whenever the CPU performs a memory
operation on unmapped regions of the address space. operation on unmapped regions of the address space.
NOTE: Disabling this config on some targets (ESP32-C6, ESP32-H2, ESP32-C5) would not generate
an exception when reading from or writing to 0x0.
config BOOTLOADER_WDT_ENABLE config BOOTLOADER_WDT_ENABLE
bool "Use RTC watchdog in start code" bool "Use RTC watchdog in start code"

View File

@@ -47,6 +47,6 @@ void bootloader_init_mem(void)
#ifdef CONFIG_BOOTLOADER_REGION_PROTECTION_ENABLE #ifdef CONFIG_BOOTLOADER_REGION_PROTECTION_ENABLE
// protect memory region // protect memory region
esp_cpu_configure_region_protection(); // TODO: [ESP32C5] IDF-8833 PSRAM support write esp_cpu_configure_region_protection();
#endif #endif
} }

View File

@@ -23,6 +23,9 @@
#define CONDITIONAL_RWX RWX #define CONDITIONAL_RWX RWX
#endif #endif
#define ALIGN_UP_TO_MMU_PAGE_SIZE(addr) (((addr) + (SOC_MMU_PAGE_SIZE) - 1) & ~((SOC_MMU_PAGE_SIZE) - 1))
#define ALIGN_DOWN_TO_MMU_PAGE_SIZE(addr) ((addr) & ~((SOC_MMU_PAGE_SIZE) - 1))
static void esp_cpu_configure_invalid_regions(void) static void esp_cpu_configure_invalid_regions(void)
{ {
const unsigned PMA_NONE = PMA_L | PMA_EN; const unsigned PMA_NONE = PMA_L | PMA_EN;
@@ -30,50 +33,50 @@ static void esp_cpu_configure_invalid_regions(void)
__attribute__((unused)) const unsigned PMA_RX = PMA_L | PMA_EN | PMA_R | PMA_X; __attribute__((unused)) const unsigned PMA_RX = PMA_L | PMA_EN | PMA_R | PMA_X;
__attribute__((unused)) const unsigned PMA_RWX = PMA_L | PMA_EN | PMA_R | PMA_W | PMA_X; __attribute__((unused)) const unsigned PMA_RWX = PMA_L | PMA_EN | PMA_R | PMA_W | PMA_X;
// 1. Gap at bottom of address space // 0. Gap at bottom of address space
PMA_ENTRY_SET_TOR(0, SOC_CPU_SUBSYSTEM_LOW, PMA_TOR | PMA_NONE); PMA_ENTRY_SET_NAPOT(0, 0, SOC_CPU_SUBSYSTEM_LOW, PMA_NAPOT | PMA_NONE);
// 2. Gap between debug region & IROM // 1. Gap between debug region & IROM
PMA_ENTRY_SET_TOR(1, SOC_CPU_SUBSYSTEM_HIGH, PMA_NONE); PMA_ENTRY_SET_TOR(1, SOC_CPU_SUBSYSTEM_HIGH, PMA_NONE);
PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE); PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE);
// 2. ROM has configured the ROM region to be cacheable, so we just need to lock the configuration
PMA_ENTRY_SET_TOR(3, SOC_IROM_MASK_LOW, PMA_NONE);
PMA_ENTRY_SET_TOR(4, SOC_DROM_MASK_HIGH, PMA_TOR | PMA_RX);
// 3. Gap between ROM & RAM // 3. Gap between ROM & RAM
PMA_ENTRY_SET_TOR(3, SOC_DROM_MASK_HIGH, PMA_NONE); PMA_ENTRY_SET_TOR(5, SOC_DROM_MASK_HIGH, PMA_NONE);
PMA_ENTRY_SET_TOR(4, SOC_IRAM_LOW, PMA_TOR | PMA_NONE); PMA_ENTRY_SET_TOR(6, SOC_IRAM_LOW, PMA_TOR | PMA_NONE);
// 4. Gap between DRAM and I_Cache // 4. Gap between DRAM and I_Cache
PMA_ENTRY_SET_TOR(5, SOC_IRAM_HIGH, PMA_NONE); PMA_ENTRY_SET_TOR(7, SOC_IRAM_HIGH, PMA_NONE);
PMA_ENTRY_SET_TOR(6, SOC_IROM_LOW, PMA_TOR | PMA_NONE); PMA_ENTRY_SET_TOR(8, SOC_IROM_LOW, PMA_TOR | PMA_NONE);
// 5. Gap between D_Cache & LP_RAM // 5. ROM has configured the MSPI region with RX permission, we should add W attribute for psram and lock the configuration
PMA_ENTRY_SET_TOR(7, SOC_DROM_HIGH, PMA_NONE); // This function sets invalid regions but this is a valid memory region configuration that could have
PMA_ENTRY_SET_TOR(8, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE); // been configured using PMP as well, but due to insufficient PMP entries we are configuring this using PMA.
PMA_ENTRY_SET_NAPOT(9, SOC_IROM_LOW, (SOC_IROM_HIGH - SOC_IROM_LOW), PMA_NAPOT | PMA_RWX);
// 6. Gap between LP memory & peripheral addresses // 6. Gap between D_Cache & LP_RAM
PMA_ENTRY_SET_TOR(9, SOC_RTC_IRAM_HIGH, PMA_NONE); PMA_ENTRY_SET_TOR(10, SOC_DROM_HIGH, PMA_NONE);
PMA_ENTRY_SET_TOR(10, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE); PMA_ENTRY_SET_TOR(11, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE);
// 7. End of address space // 7. Gap between LP memory & peripheral addresses
PMA_ENTRY_SET_TOR(11, SOC_PERIPHERAL_HIGH, PMA_NONE); PMA_ENTRY_SET_TOR(12, SOC_RTC_IRAM_HIGH, PMA_NONE);
PMA_ENTRY_SET_TOR(12, UINT32_MAX, PMA_TOR | PMA_NONE); PMA_ENTRY_SET_TOR(13, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE);
// 8. End of address space
PMA_ENTRY_SET_TOR(14, SOC_PERIPHERAL_HIGH, PMA_NONE);
PMA_ENTRY_SET_TOR(15, UINT32_MAX, PMA_TOR | PMA_NONE);
} }
void esp_cpu_configure_region_protection(void) void esp_cpu_configure_region_protection(void)
{ {
// ROM has configured the MSPI region with RX permission, we should add W attribute for psram
PMA_ENTRY_SET_NAPOT(0, SOC_IROM_LOW, (SOC_IROM_HIGH - SOC_IROM_LOW), PMA_NAPOT | PMA_EN | PMA_R | PMA_W | PMA_X);
// Configure just the area around 0x0 for now so that we at least get exceptions for
// writes/reads to NULL pointers, as well as code that relies on writes to 0x0
// to abort/assert
PMA_ENTRY_SET_NAPOT(1, 0, SOC_CPU_SUBSYSTEM_LOW, PMA_NAPOT | PMA_EN);
return;
/* Notes on implementation: /* Notes on implementation:
* *
* 1) Note: ESP32-C6 CPU doesn't support overlapping PMP regions * 1) Note: ESP32-C5 CPU support overlapping PMP regions // TODO: verify this statement?
* *
* 2) ESP32-C6 supports 16 PMA regions so we use this feature to block all the invalid address ranges * 2) ESP32-C5 supports 16 PMA regions so we use this feature to block all the invalid address ranges
* *
* 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range) * 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range)
* entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries * entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries
@@ -105,10 +108,10 @@ void esp_cpu_configure_region_protection(void)
* We also lock these entries so the R/W/X permissions are enforced even for machine mode * We also lock these entries so the R/W/X permissions are enforced even for machine mode
*/ */
const unsigned NONE = PMP_L; const unsigned NONE = PMP_L;
const unsigned R = PMP_L | PMP_R; __attribute__((unused)) const unsigned R = PMP_L | PMP_R;
const unsigned RW = PMP_L | PMP_R | PMP_W; __attribute__((unused)) const unsigned RW = PMP_L | PMP_R | PMP_W;
const unsigned RX = PMP_L | PMP_R | PMP_X; __attribute__((unused)) const unsigned RX = PMP_L | PMP_R | PMP_X;
const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X; __attribute__((unused)) const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X;
// //
// Configure all the invalid address regions using PMA // Configure all the invalid address regions using PMA
@@ -119,36 +122,27 @@ void esp_cpu_configure_region_protection(void)
// Configure all the valid address regions using PMP // Configure all the valid address regions using PMP
// //
// 1. Debug region // 1. CPU Subsystem region - contains interrupt config registers
const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_CPU_SUBSYSTEM_LOW, SOC_CPU_SUBSYSTEM_HIGH); const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_CPU_SUBSYSTEM_LOW, SOC_CPU_SUBSYSTEM_HIGH);
PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | RWX); PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | RWX);
_Static_assert(SOC_CPU_SUBSYSTEM_LOW < SOC_CPU_SUBSYSTEM_HIGH, "Invalid CPU debug region"); _Static_assert(SOC_CPU_SUBSYSTEM_LOW < SOC_CPU_SUBSYSTEM_HIGH, "Invalid CPU subsystem region");
// 2.1 I-ROM // 2. I/D-ROM
PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, NONE); PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, NONE);
PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | RX); PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | RX);
_Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I-ROM region"); _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I/D-ROM region");
// 2.2 D-ROM
PMP_ENTRY_SET(3, SOC_DROM_MASK_LOW, NONE);
PMP_ENTRY_SET(4, SOC_DROM_MASK_HIGH, PMP_TOR | R);
_Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid D-ROM region");
// 3. IRAM and DRAM
if (esp_cpu_dbgr_is_attached()) { if (esp_cpu_dbgr_is_attached()) {
// Anti-FI check that cpu is really in ocd mode // Anti-FI check that cpu is really in ocd mode
ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached()); ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached());
// 5. IRAM and DRAM
// const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH);
// PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | RWX);
// _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
PMP_ENTRY_SET(5, SOC_IRAM_LOW, NONE); PMP_ENTRY_SET(5, SOC_IRAM_LOW, NONE);
PMP_ENTRY_SET(6, SOC_IRAM_HIGH, PMP_TOR | RWX); PMP_ENTRY_SET(6, SOC_IRAM_HIGH, PMP_TOR | RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region"); _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
} else { } else {
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _iram_end; extern int _iram_end;
// 5. IRAM and DRAM
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits /* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared * Bootloader might have given extra permissions and those won't be cleared
*/ */
@@ -159,55 +153,61 @@ void esp_cpu_configure_region_protection(void)
PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | RX); PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | RX);
PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | RW); PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | RW);
#else #else
// 5. IRAM and DRAM PMP_ENTRY_SET(5, SOC_IRAM_LOW, CONDITIONAL_NONE);
// const uint32_t pmpaddr5 = PMPADDR_NAPOT(SOC_IRAM_LOW, SOC_IRAM_HIGH); PMP_ENTRY_SET(6, SOC_IRAM_HIGH, PMP_TOR | CONDITIONAL_RWX);
// PMP_ENTRY_SET(5, pmpaddr5, PMP_NAPOT | CONDITIONAL_RWX);
// _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
PMP_ENTRY_SET(5, SOC_IRAM_LOW, NONE);
PMP_ENTRY_SET(6, SOC_IRAM_HIGH, PMP_TOR | RWX);
_Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region"); _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region");
#endif #endif
} }
// 4. I_Cache (flash) // 4. I_Cache / D_Cache (flash)
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _instruction_reserved_end;
extern int _rodata_reserved_end;
const uint32_t irom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_instruction_reserved_end));
const uint32_t drom_resv_end = ALIGN_UP_TO_MMU_PAGE_SIZE((uint32_t)(&_rodata_reserved_end));
PMP_ENTRY_CFG_RESET(8);
PMP_ENTRY_CFG_RESET(9);
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_SET(8, SOC_IROM_LOW, NONE);
PMP_ENTRY_SET(9, irom_resv_end, PMP_TOR | RX);
PMP_ENTRY_SET(10, drom_resv_end, PMP_TOR | R);
#else
const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH); const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH);
PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | RX); // Add the W attribute in the case of PSRAM
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region"); PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I/D_Cache region");
#endif
// 5. D_Cache (flash) // 5. LP memory
const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_DROM_LOW, SOC_DROM_HIGH);
PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | R);
_Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region");
// 6. LP memory
#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD #if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD
extern int _rtc_text_end; extern int _rtc_text_end;
/* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits /* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits
* Bootloader might have given extra permissions and those won't be cleared * Bootloader might have given extra permissions and those won't be cleared
*/ */
PMP_ENTRY_CFG_RESET(10);
PMP_ENTRY_CFG_RESET(11); PMP_ENTRY_CFG_RESET(11);
PMP_ENTRY_CFG_RESET(12); PMP_ENTRY_CFG_RESET(12);
PMP_ENTRY_CFG_RESET(13); PMP_ENTRY_CFG_RESET(13);
PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE); PMP_ENTRY_CFG_RESET(14);
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE);
#if CONFIG_ULP_COPROC_RESERVE_MEM #if CONFIG_ULP_COPROC_RESERVE_MEM
// First part of LP mem is reserved for coprocessor // First part of LP mem is reserved for coprocessor
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW); PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW + CONFIG_ULP_COPROC_RESERVE_MEM, PMP_TOR | RW);
#else // CONFIG_ULP_COPROC_RESERVE_MEM #else // CONFIG_ULP_COPROC_RESERVE_MEM
// Repeat same previous entry, to ensure next entry has correct base address (TOR) // Repeat same previous entry, to ensure next entry has correct base address (TOR)
PMP_ENTRY_SET(11, SOC_RTC_IRAM_LOW, NONE); PMP_ENTRY_SET(12, SOC_RTC_IRAM_LOW, NONE);
#endif // !CONFIG_ULP_COPROC_RESERVE_MEM #endif // !CONFIG_ULP_COPROC_RESERVE_MEM
PMP_ENTRY_SET(12, (int)&_rtc_text_end, PMP_TOR | RX); PMP_ENTRY_SET(13, (int)&_rtc_text_end, PMP_TOR | RX);
PMP_ENTRY_SET(13, SOC_RTC_IRAM_HIGH, PMP_TOR | RW); PMP_ENTRY_SET(14, SOC_RTC_IRAM_HIGH, PMP_TOR | RW);
#else #else
const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH); const uint32_t pmpaddr11 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH);
PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX); PMP_ENTRY_SET(11, pmpaddr11, PMP_NAPOT | CONDITIONAL_RWX);
_Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region"); _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region");
#endif #endif
// 6. Peripheral addresses
// 7. Peripheral addresses const uint32_t pmpaddr15 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH);
const uint32_t pmpaddr14 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH); PMP_ENTRY_SET(15, pmpaddr15, PMP_NAPOT | RW);
PMP_ENTRY_SET(14, pmpaddr14, PMP_NAPOT | RW);
_Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region"); _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region");
} }

View File

@@ -27,6 +27,9 @@ SECTIONS
*rtc_wake_stub*.*(.text .text.*) *rtc_wake_stub*.*(.text .text.*)
*(.rtc_text_end_test) *(.rtc_text_end_test)
/* Align the end of RTC code region as per PMP granularity */
. = ALIGN(_esp_pmp_align_size);
_rtc_text_end = ABSOLUTE(.); _rtc_text_end = ABSOLUTE(.);
} > lp_ram_seg } > lp_ram_seg
@@ -166,6 +169,9 @@ SECTIONS
/* Marks the end of IRAM code segment */ /* Marks the end of IRAM code segment */
.iram0.text_end (NOLOAD) : .iram0.text_end (NOLOAD) :
{ {
/* Align the end of code region as per PMP region granularity */
. = ALIGN(_esp_pmp_align_size);
ALIGNED_SYMBOL(4, _iram_text_end) ALIGNED_SYMBOL(4, _iram_text_end)
} > sram_seg } > sram_seg

View File

@@ -235,6 +235,10 @@ config SOC_CPU_IDRAM_SPLIT_USING_PMP
bool bool
default y default y
config SOC_CPU_PMP_REGION_GRANULARITY
int
default 128
config SOC_DS_SIGNATURE_MAX_BIT_LEN config SOC_DS_SIGNATURE_MAX_BIT_LEN
int int
default 3072 default 3072

View File

@@ -154,6 +154,7 @@
#define SOC_CPU_HAS_PMA 1 #define SOC_CPU_HAS_PMA 1
#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1 #define SOC_CPU_IDRAM_SPLIT_USING_PMP 1
#define SOC_CPU_PMP_REGION_GRANULARITY 128
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/ /*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/
/** The maximum length of a Digital Signature in bits. */ /** The maximum length of a Digital Signature in bits. */