From fb1ef7c6d6baa6a794cf56f8ee18971f96237039 Mon Sep 17 00:00:00 2001 From: Mahavir Jain Date: Fri, 10 Mar 2023 09:36:32 +0530 Subject: [PATCH] esp32h2: enable memory protection scheme using PMA and PMP Closes IDF-6452 --- .../port/esp32h2/cpu_region_protect.c | 206 +++++++++++++----- .../esp32h2/include/soc/Kconfig.soc_caps.in | 8 + components/soc/esp32h2/include/soc/soc_caps.h | 3 + 3 files changed, 163 insertions(+), 54 deletions(-) diff --git a/components/esp_hw_support/port/esp32h2/cpu_region_protect.c b/components/esp_hw_support/port/esp32h2/cpu_region_protect.c index e006547c73..cfe2d66837 100644 --- a/components/esp_hw_support/port/esp32h2/cpu_region_protect.c +++ b/components/esp_hw_support/port/esp32h2/cpu_region_protect.c @@ -10,83 +10,181 @@ #include "esp_cpu.h" #include "esp_fault.h" -// ESP32H2-TODO: IDF-6452 +#ifdef BOOTLOADER_BUILD +// Without L bit set +#define CONDITIONAL_NONE 0x0 +#define CONDITIONAL_RX PMP_R | PMP_X +#define CONDITIONAL_RW PMP_R | PMP_W +#define CONDITIONAL_RWX PMP_R | PMP_W | PMP_X +#else +// With L bit set +#define CONDITIONAL_NONE NONE +#define CONDITIONAL_RX RX +#define CONDITIONAL_RW RW +#define CONDITIONAL_RWX RWX +#endif + +static void esp_cpu_configure_invalid_regions(void) +{ + const unsigned PMA_NONE = PMA_L | PMA_EN; + __attribute__((unused)) const unsigned PMA_RW = PMA_L | PMA_EN | PMA_R | PMA_W; + __attribute__((unused)) const unsigned PMA_RX = PMA_L | PMA_EN | PMA_R | PMA_X; + __attribute__((unused)) const unsigned PMA_RWX = PMA_L | PMA_EN | PMA_R | PMA_W | PMA_X; + + // 1. Gap at bottom of address space + PMA_ENTRY_SET_TOR(0, SOC_DEBUG_LOW, PMA_TOR | PMA_NONE); + + // 2. Gap between debug region & IROM + PMA_ENTRY_SET_TOR(1, SOC_DEBUG_HIGH, PMA_NONE); + PMA_ENTRY_SET_TOR(2, SOC_IROM_MASK_LOW, PMA_TOR | PMA_NONE); + + // 3. Gap between ROM & RAM + PMA_ENTRY_SET_TOR(3, SOC_DROM_MASK_HIGH, PMA_NONE); + PMA_ENTRY_SET_TOR(4, SOC_IRAM_LOW, PMA_TOR | PMA_NONE); + + // 4. Gap between DRAM and I_Cache + PMA_ENTRY_SET_TOR(5, SOC_IRAM_HIGH, PMA_NONE); + PMA_ENTRY_SET_TOR(6, SOC_IROM_LOW, PMA_TOR | PMA_NONE); + + // 5. Gap between D_Cache & LP_RAM + PMA_ENTRY_SET_TOR(7, SOC_DROM_HIGH, PMA_NONE); + PMA_ENTRY_SET_TOR(8, SOC_RTC_IRAM_LOW, PMA_TOR | PMA_NONE); + + // 6. Gap between LP memory & peripheral addresses + PMA_ENTRY_SET_TOR(9, SOC_RTC_IRAM_HIGH, PMA_NONE); + PMA_ENTRY_SET_TOR(10, SOC_PERIPHERAL_LOW, PMA_TOR | PMA_NONE); + + // 7. End of address space + PMA_ENTRY_SET_TOR(11, SOC_PERIPHERAL_HIGH, PMA_NONE); + PMA_ENTRY_SET_TOR(12, UINT32_MAX, PMA_TOR | PMA_NONE); +} + void esp_cpu_configure_region_protection(void) { /* Notes on implementation: * * 1) Note: ESP32-H2 CPU doesn't support overlapping PMP regions * - * 2) Therefore, we use TOR (top of range) entries to map the whole address - * space, bottom to top. + * 2) ESP32-H2 supports 16 PMA regions so we use this feature to block all the invalid address ranges * - * 3) There are not enough entries to describe all the memory regions 100% accurately. + * 3) We use combination of NAPOT (Naturally Aligned Power Of Two) and TOR (top of range) + * entries to map all the valid address space, bottom to top. This leaves us with some extra PMP entries + * which can be used to provide more granular access * - * 4) This means some gaps (invalid memory) are accessible. Priority for extending regions - * to cover gaps is to extend read-only or read-execute regions or read-only regions only - * (executing unmapped addresses should always fault with invalid instruction, read-only means - * stores will correctly fault even if reads may return some invalid value.) - * - * 5) Entries are grouped in order with some static asserts to try and verify everything is + * 4) Entries are grouped in order with some static asserts to try and verify everything is * correct. */ - const unsigned NONE = PMP_L | PMP_TOR; - const unsigned RW = PMP_L | PMP_TOR | PMP_R | PMP_W; - const unsigned RX = PMP_L | PMP_TOR | PMP_R | PMP_X; - const unsigned RWX = PMP_L | PMP_TOR | PMP_R | PMP_W | PMP_X; - // 1. Gap at bottom of address space - PMP_ENTRY_SET(0, SOC_DEBUG_LOW, NONE); + /* There are 4 configuration scenarios for SRAM + * + * 1. Bootloader build: + * - We cannot set the lock bit as we need to reconfigure it again for the application. + * We configure PMP to cover entire valid IRAM and DRAM range. + * + * 2. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT enabled + * - We split the SRAM into IRAM and DRAM such that IRAM region cannot be written to + * and DRAM region cannot be executed. We use _iram_end and _data_start markers to set the boundaries. + * We also lock these entries so the R/W/X permissions are enforced even for machine mode + * + * 3. Application build with CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT disabled + * - The IRAM-DRAM split is not enabled so we just need to ensure that access to only valid address ranges are successful + * so for that we set PMP to cover entire valid IRAM and DRAM region. + * We also lock these entries so the R/W/X permissions are enforced even for machine mode + * + * 4. CPU is in OCD debug mode + * - The IRAM-DRAM split is not enabled so that OpenOCD can write and execute from IRAM. + * We set PMP to cover entire valid IRAM and DRAM region. + * We also lock these entries so the R/W/X permissions are enforced even for machine mode + */ + const unsigned NONE = PMP_L; + const unsigned R = PMP_L | PMP_R; + const unsigned RW = PMP_L | PMP_R | PMP_W; + const unsigned RX = PMP_L | PMP_R | PMP_X; + const unsigned RWX = PMP_L | PMP_R | PMP_W | PMP_X; - // 2. Debug region - PMP_ENTRY_SET(1, SOC_DEBUG_HIGH, RWX); + // + // Configure all the invalid address regions using PMA + // + esp_cpu_configure_invalid_regions(); + + // + // Configure all the valid address regions using PMP + // + + // 1. Debug region + const uint32_t pmpaddr0 = PMPADDR_NAPOT(SOC_DEBUG_LOW, SOC_DEBUG_HIGH); + PMP_ENTRY_SET(0, pmpaddr0, PMP_NAPOT | RWX); _Static_assert(SOC_DEBUG_LOW < SOC_DEBUG_HIGH, "Invalid CPU debug region"); - // 3. Gap between debug region & IROM - PMP_ENTRY_SET(2, SOC_IROM_MASK_LOW, NONE); - _Static_assert(SOC_DEBUG_HIGH < SOC_IROM_MASK_LOW, "Invalid PMP entry order"); + // 2.1 I-ROM + PMP_ENTRY_SET(1, SOC_IROM_MASK_LOW, NONE); + PMP_ENTRY_SET(2, SOC_IROM_MASK_HIGH, PMP_TOR | RX); + _Static_assert(SOC_IROM_MASK_LOW < SOC_IROM_MASK_HIGH, "Invalid I-ROM region"); - // 4. ROM - PMP_ENTRY_SET(3, SOC_DROM_MASK_HIGH, RX); - _Static_assert(SOC_IROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid ROM region"); + // 2.2 D-ROM + PMP_ENTRY_SET(3, SOC_DROM_MASK_LOW, NONE); + PMP_ENTRY_SET(4, SOC_DROM_MASK_HIGH, PMP_TOR | R); + _Static_assert(SOC_DROM_MASK_LOW < SOC_DROM_MASK_HIGH, "Invalid D-ROM region"); - // 5. Gap between ROM & RAM - PMP_ENTRY_SET(4, SOC_IRAM_LOW, NONE); - _Static_assert(SOC_DROM_MASK_HIGH < SOC_IRAM_LOW, "Invalid PMP entry order"); + if (esp_cpu_dbgr_is_attached()) { + // Anti-FI check that cpu is really in ocd mode + ESP_FAULT_ASSERT(esp_cpu_dbgr_is_attached()); - // 6. RAM - PMP_ENTRY_SET(5, SOC_IRAM_HIGH, RWX); - _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region"); + // 5. IRAM and DRAM + PMP_ENTRY_SET(5, SOC_IRAM_LOW, NONE); + PMP_ENTRY_SET(6, SOC_IRAM_HIGH, PMP_TOR | RWX); + _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region"); + } else { +#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD + extern int _iram_end; + // 5. IRAM and DRAM + /* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits + * Bootloader might have given extra permissions and those won't be cleared + */ + PMP_ENTRY_CFG_RESET(5); + PMP_ENTRY_CFG_RESET(6); + PMP_ENTRY_CFG_RESET(7); + PMP_ENTRY_SET(5, SOC_IRAM_LOW, NONE); + PMP_ENTRY_SET(6, (int)&_iram_end, PMP_TOR | RX); + PMP_ENTRY_SET(7, SOC_DRAM_HIGH, PMP_TOR | RW); +#else + // 5. IRAM and DRAM + PMP_ENTRY_SET(5, SOC_IRAM_LOW, CONDITIONAL_NONE); + PMP_ENTRY_SET(6, SOC_IRAM_HIGH, PMP_TOR | CONDITIONAL_RWX); + _Static_assert(SOC_IRAM_LOW < SOC_IRAM_HIGH, "Invalid RAM region"); +#endif + } - // 7. Gap between DRAM and I_Cache - PMP_ENTRY_SET(6, SOC_IROM_LOW, NONE); - _Static_assert(SOC_IRAM_HIGH < SOC_IROM_LOW, "Invalid PMP entry order"); - - // 8. I_Cache (flash) - PMP_ENTRY_SET(7, SOC_IROM_HIGH, RWX); + // 4. I_Cache (flash) + const uint32_t pmpaddr8 = PMPADDR_NAPOT(SOC_IROM_LOW, SOC_IROM_HIGH); + PMP_ENTRY_SET(8, pmpaddr8, PMP_NAPOT | RX); _Static_assert(SOC_IROM_LOW < SOC_IROM_HIGH, "Invalid I_Cache region"); - // 9. D_Cache (flash) - PMP_ENTRY_SET(8, SOC_DROM_HIGH, RW); + // 5. D_Cache (flash) + const uint32_t pmpaddr9 = PMPADDR_NAPOT(SOC_DROM_LOW, SOC_DROM_HIGH); + PMP_ENTRY_SET(9, pmpaddr9, PMP_NAPOT | R); _Static_assert(SOC_DROM_LOW < SOC_DROM_HIGH, "Invalid D_Cache region"); - // 10. Gap between D_Cache & LP_RAM - PMP_ENTRY_SET(9, SOC_RTC_IRAM_LOW, NONE); - _Static_assert(SOC_DROM_HIGH < SOC_RTC_IRAM_LOW, "Invalid PMP entry order"); - - // 16. LP memory - PMP_ENTRY_SET(10, SOC_RTC_IRAM_HIGH, RWX); + // 6. LP memory +#if CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT && !BOOTLOADER_BUILD + extern int _rtc_text_end; + /* Reset the corresponding PMP config because PMP_ENTRY_SET only sets the given bits + * Bootloader might have given extra permissions and those won't be cleared + */ + PMP_ENTRY_CFG_RESET(10); + PMP_ENTRY_CFG_RESET(11); + PMP_ENTRY_CFG_RESET(12); + PMP_ENTRY_SET(10, SOC_RTC_IRAM_LOW, NONE); + PMP_ENTRY_SET(11, (int)&_rtc_text_end, PMP_TOR | RX); + PMP_ENTRY_SET(12, SOC_RTC_IRAM_HIGH, PMP_TOR | RW); +#else + const uint32_t pmpaddr10 = PMPADDR_NAPOT(SOC_RTC_IRAM_LOW, SOC_RTC_IRAM_HIGH); + PMP_ENTRY_SET(10, pmpaddr10, PMP_NAPOT | CONDITIONAL_RWX); _Static_assert(SOC_RTC_IRAM_LOW < SOC_RTC_IRAM_HIGH, "Invalid RTC IRAM region"); +#endif - // 17. Gap between LP memory & peripheral addresses - PMP_ENTRY_SET(11, SOC_PERIPHERAL_LOW, NONE); - _Static_assert(SOC_RTC_IRAM_HIGH < SOC_PERIPHERAL_LOW, "Invalid PMP entry order"); - - // 18. Peripheral addresses - PMP_ENTRY_SET(12, SOC_PERIPHERAL_HIGH, RW); + // 7. Peripheral addresses + const uint32_t pmpaddr13 = PMPADDR_NAPOT(SOC_PERIPHERAL_LOW, SOC_PERIPHERAL_HIGH); + PMP_ENTRY_SET(13, pmpaddr13, PMP_NAPOT | RW); _Static_assert(SOC_PERIPHERAL_LOW < SOC_PERIPHERAL_HIGH, "Invalid peripheral region"); - - // 19. End of address space - PMP_ENTRY_SET(13, UINT32_MAX, NONE); // all but last 4 bytes - PMP_ENTRY_SET(14, UINT32_MAX, PMP_L | PMP_NA4); // last 4 bytes } diff --git a/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in b/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in index d342077dd8..b565f90969 100644 --- a/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32h2/include/soc/Kconfig.soc_caps.in @@ -307,6 +307,14 @@ config SOC_CPU_WATCHPOINT_SIZE hex default 0x80000000 +config SOC_CPU_HAS_PMA + bool + default y + +config SOC_CPU_IDRAM_SPLIT_USING_PMP + bool + default y + config SOC_MMU_PAGE_SIZE_CONFIGURABLE bool default y diff --git a/components/soc/esp32h2/include/soc/soc_caps.h b/components/soc/esp32h2/include/soc/soc_caps.h index abb72b6a14..deefa9d2a6 100644 --- a/components/soc/esp32h2/include/soc/soc_caps.h +++ b/components/soc/esp32h2/include/soc/soc_caps.h @@ -134,6 +134,9 @@ #define SOC_CPU_WATCHPOINTS_NUM 4 #define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes +#define SOC_CPU_HAS_PMA 1 +#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1 + /*-------------------------- MMU CAPS ----------------------------------------*/ #define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1) #define SOC_MMU_PERIPH_NUM (1U)