From 4144746fc3bc5c5d91df611e08008185ba3cce1f Mon Sep 17 00:00:00 2001 From: Armando Date: Fri, 10 Feb 2023 20:40:51 +0800 Subject: [PATCH] esp_mm: shared and non-shared mapping --- components/esp_mm/esp_mmu_map.c | 111 ++++++++++++++++-- components/esp_mm/include/esp_mmu_map.h | 24 +++- .../esp_mm/test_apps/mmap/main/test_mmap.c | 6 +- .../esp_mm/test_apps/mmap/main/test_mmap_hw.c | 6 +- components/spi_flash/flash_mmap.c | 4 +- 5 files changed, 131 insertions(+), 20 deletions(-) diff --git a/components/esp_mm/esp_mmu_map.c b/components/esp_mm/esp_mmu_map.c index 8c445bd2d7..29d158603e 100644 --- a/components/esp_mm/esp_mmu_map.c +++ b/components/esp_mm/esp_mmu_map.c @@ -114,8 +114,14 @@ typedef struct { mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM]; } mmu_ctx_t; + static mmu_ctx_t s_mmu_ctx; +#if ENABLE_PADDR_CHECK +static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size); +static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size); +#endif //#if ENABLE_PADDR_CHECK + #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums) @@ -387,7 +393,7 @@ static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t v ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%"PRIx32, actual_mapped_len); } -esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr) +esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, mmu_mem_caps_t caps, int flags, void **out_ptr) { esp_err_t ret = ESP_FAIL; ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer"); @@ -436,22 +442,35 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mem_block_t *mem_block = NULL; #if ENABLE_PADDR_CHECK - bool is_mapped = false; + bool is_enclosed = false; + bool is_overlapped = false; + bool allow_overlap = flags & ESP_MMU_MMAP_FLAG_PADDR_SHARED; + TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) { if (target == mem_block->target) { - if ((paddr_start >= mem_block->paddr_start) && ((paddr_start + aligned_size) <= mem_block->paddr_end)) { - //the to-be-mapped paddr region is mapped already - is_mapped = true; + if ((s_is_enclosed(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) { + //the to-be-mapped paddr block is mapped already + is_enclosed = true; + break; + } + + if (!allow_overlap && (s_is_overlapped(mem_block->paddr_start, mem_block->paddr_end, paddr_start, aligned_size))) { + is_overlapped = true; break; } } } - if (is_mapped) { - ESP_LOGW(TAG, "paddr region is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size); + if (is_enclosed) { + ESP_LOGW(TAG, "paddr block is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size); *out_ptr = (void *)mem_block->vaddr_start; return ESP_ERR_INVALID_STATE; } + + if (!allow_overlap && is_overlapped) { + ESP_LOGE(TAG, "paddr block is overlapped with an already mapped paddr block"); + return ESP_ERR_INVALID_ARG; + } #endif //#if ENABLE_PADDR_CHECK new_block = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT); @@ -511,9 +530,6 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, return ESP_OK; err: - if (new_block) { - free(new_block); - } if (dummy_tail) { free(dummy_tail); } @@ -682,7 +698,7 @@ static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target) { ESP_RETURN_ON_FALSE(vaddr && out_paddr, ESP_ERR_INVALID_ARG, TAG, "null pointer"); - ESP_RETURN_ON_FALSE(mmu_ll_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address"); + ESP_RETURN_ON_FALSE(mmu_hal_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address"); esp_paddr_t paddr = 0; mmu_target_t target = 0; @@ -722,3 +738,76 @@ esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vad return ESP_OK; } + + +#if ENABLE_PADDR_CHECK +/*--------------------------------------------------------------- + Helper functions to check block +---------------------------------------------------------------*/ +/** + * Check if a new block is enclosed by another, e.g. + * + * This is enclosed: + * + * new_block_start new_block_end + * |-------- New Block --------| + * |--------------- Block ---------------| + * block_start block_end + * + * @note Note the difference between `s_is_overlapped()` below + * + * @param block_start An original block start + * @param block_end An original block end + * @param new_block_start New block start + * @param new_block_size New block size + * + * @return True: new block is enclosed; False: new block is not enclosed + */ +static bool s_is_enclosed(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size) +{ + bool is_enclosed = false; + uint32_t new_block_end = new_block_start + new_block_size; + + if ((new_block_start >= block_start) && (new_block_end <= block_end)) { + is_enclosed = true; + } else { + is_enclosed = false; + } + + return is_enclosed; +} + +/** + * Check if a new block is overlapped by another, e.g. + * + * This is overlapped: + * + * new_block_start new_block_end + * |---------- New Block ----------| + * |--------------- Block ---------------| + * block_start block_end + * + * @note Note the difference between `s_is_enclosed()` above + * + * @param block_start An original block start + * @param block_end An original block end + * @param new_block_start New block start + * @param new_block_size New block size + * + * @return True: new block is overlapped; False: new block is not overlapped + */ +static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t new_block_start, uint32_t new_block_size) +{ + bool is_overlapped = false; + uint32_t new_block_end = new_block_start + new_block_size; + + if (((new_block_start < block_start) && (new_block_end > block_start)) || + ((new_block_start < block_end) && (new_block_end > block_end))) { + is_overlapped = true; + } else { + is_overlapped = false; + } + + return is_overlapped; +} +#endif //#if ENABLE_PADDR_CHECK diff --git a/components/esp_mm/include/esp_mmu_map.h b/components/esp_mm/include/esp_mmu_map.h index 87a27f09ce..33d3396441 100644 --- a/components/esp_mm/include/esp_mmu_map.h +++ b/components/esp_mm/include/esp_mmu_map.h @@ -8,6 +8,7 @@ #include #include #include "esp_err.h" +#include "esp_bit_defs.h" #include "hal/mmu_types.h" #ifdef __cplusplus @@ -34,6 +35,24 @@ extern "C" { * - A Slot is the vaddr range between 2 blocks. */ + +/** + * MMAP flags + */ +/** + * @brief Share this mapping + * + * - If this flag is set, a paddr block can be mapped to multiple vaddr blocks. + * 1. This happens when: + * - the to-be-mapped paddr block is overlapped with an already mapped paddr block. + * - the to-be-mapped paddr block encloses an already mapped paddr block. + * 2. If the to-be-mapped paddr block is enclosed by an already mapped paddr block, no new mapping will happen, return ESP_ERR_INVALID_STATE. The out pointer will be the already mapped paddr corresponding vaddr. + * 3. If the to-be-mapped paddr block is totally the same as an already mapped paddr block, no new mapping will happen, return ESP_ERR_INVALID_STATE. The out pointer will be the corresponding vaddr. + * + * - If this flag isn't set, overlapped, enclosed or same to-be-mapped paddr block will lead to ESP_ERR_INVALID_ARG. + */ +#define ESP_MMU_MMAP_FLAG_PADDR_SHARED BIT(0) + /** * @brief Physical memory type */ @@ -46,8 +65,9 @@ typedef uint32_t esp_paddr_t; * * @param[in] paddr_start Start address of the physical memory block * @param[in] size Size to be mapped. Size will be rounded up by to the nearest multiple of MMU page size - * @param[in] caps Memory capabilities, see `mmu_mem_caps_t` * @param[in] target Physical memory target you're going to map to, see `mmu_target_t` + * @param[in] caps Memory capabilities, see `mmu_mem_caps_t` + * @param[in] flags Mmap flags * @param[out] out_ptr Start address of the mapped virtual memory * * @return @@ -64,7 +84,7 @@ typedef uint32_t esp_paddr_t; * block_start block_end * */ -esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr); +esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target, mmu_mem_caps_t caps, int flags, void **out_ptr); /** * @brief Unmap a previously mapped virtual memory block diff --git a/components/esp_mm/test_apps/mmap/main/test_mmap.c b/components/esp_mm/test_apps/mmap/main/test_mmap.c index e12903aaf8..3e52966a7e 100644 --- a/components/esp_mm/test_apps/mmap/main/test_mmap.c +++ b/components/esp_mm/test_apps/mmap/main/test_mmap.c @@ -38,11 +38,11 @@ TEST_CASE("Can dump mapped block stats", "[mmu]") ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size); void *ptr0 = NULL; - TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr0)); + TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ, 0, &ptr0)); void *ptr1 = NULL; - TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr1)); + TEST_ESP_OK(esp_mmu_map(part->address + TEST_BLOCK_SIZE, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_EXEC, 0, &ptr1)); void *ptr2 = NULL; - TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr2)); + TEST_ESP_OK(esp_mmu_map(part->address + 2 * TEST_BLOCK_SIZE, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ, 0, &ptr2)); esp_mmu_map_dump_mapped_blocks(stdout); diff --git a/components/esp_mm/test_apps/mmap/main/test_mmap_hw.c b/components/esp_mm/test_apps/mmap/main/test_mmap_hw.c index d48cf53992..6af3f9c9a7 100644 --- a/components/esp_mm/test_apps/mmap/main/test_mmap_hw.c +++ b/components/esp_mm/test_apps/mmap/main/test_mmap_hw.c @@ -109,7 +109,8 @@ TEST_CASE("test all readable vaddr can map to flash", "[mmu]") TEST_ASSERT(block_info && "no mem"); void *ptr = NULL; - ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr); + //No need to use flag, we enabled ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR in this test_app + ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ, 0, &ptr); if (ret == ESP_OK) { ESP_LOGI(TAG, "ptr is %p", ptr); bool success = s_test_mmap_data_by_random((uint8_t *)ptr, sizeof(sector_buf), test_seed); @@ -156,7 +157,8 @@ TEST_CASE("test all executable vaddr can map to flash", "[mmu]") TEST_ASSERT(block_info && "no mem"); void *ptr = NULL; - ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr); + //No need to use flag, we enabled ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR in this test_app + ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_TARGET_FLASH0, MMU_MEM_CAP_EXEC, 0, &ptr); if (ret == ESP_OK) { ESP_LOGI(TAG, "ptr is %p", ptr); for (int i = 0; i < TEST_BLOCK_SIZE; i += 0x100) { diff --git a/components/spi_flash/flash_mmap.c b/components/spi_flash/flash_mmap.c index b8860f9e04..3616729725 100644 --- a/components/spi_flash/flash_mmap.c +++ b/components/spi_flash/flash_mmap.c @@ -92,7 +92,7 @@ esp_err_t spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t m } else { caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT; } - ret = esp_mmu_map(src_addr, size, caps, MMU_TARGET_FLASH0, &ptr); + ret = esp_mmu_map(src_addr, size, MMU_TARGET_FLASH0, caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr); if (ret == ESP_OK) { vaddr_list[0] = (uint32_t)ptr; block->list_num = 1; @@ -202,7 +202,7 @@ esp_err_t spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mm } for (int i = 0; i < block_num; i++) { void *ptr = NULL; - ret = esp_mmu_map(paddr_blocks[i][0], paddr_blocks[i][1], caps, MMU_TARGET_FLASH0, &ptr); + ret = esp_mmu_map(paddr_blocks[i][0], paddr_blocks[i][1], MMU_TARGET_FLASH0, caps, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr); if (ret == ESP_OK) { vaddr_list[i] = (uint32_t)ptr; successful_cnt++;