esp_mm: correct mmap reserved irom and drom size

Prior to this commit, we don't consider the offset of the irom vaddr
start.

If the offset + size is bigger than the MMU page size, for example:
MMU page size: 0x10000
irom vaddr: 0x4200_0800, so offset = 0x800
irom size: 0xF900
offset + size = 0x10100

Under this condition, the 0x4200_0000 ~ 0x4202_0000, two MMU pages are
used.

With this commit, when reserving the irom and drom, we take the offset
into consideration as well.
This commit is contained in:
Armando
2023-02-06 20:06:19 +08:00
parent 06e7c02da7
commit 98892a3288

View File

@@ -36,8 +36,10 @@
#include "esp_mmu_map.h" #include "esp_mmu_map.h"
//This is for size align
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1)) #define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
//This is for vaddr align
#define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
//This flag indicates the memory region is merged, we don't care about it anymore //This flag indicates the memory region is merged, we don't care about it anymore
#define MEM_REGION_MERGED -1 #define MEM_REGION_MERGED -1
@@ -129,6 +131,7 @@ static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums)
size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start; size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve); assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
irom_len_to_reserve += (uint32_t)&_instruction_reserved_start - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE); irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve); cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
@@ -156,6 +159,7 @@ static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums)
size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start; size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve); assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
drom_len_to_reserve += (uint32_t)&_rodata_reserved_start - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE); drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve); cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve);