Merge branch 'feature/mmu_driver' into 'master'

esp_mm: new virtual memory mapping driver via mmu

Closes IDF-5847, IDF-6076, IDF-5023, IDF-5339, and IDFGH-8961

See merge request espressif/esp-idf!20540
This commit is contained in:
Armando (Dou Yiwen)
2023-02-08 00:59:58 +08:00
108 changed files with 3866 additions and 1208 deletions

View File

@@ -91,6 +91,7 @@
/components/esp_hw_support/ @esp-idf-codeowners/system @esp-idf-codeowners/peripherals /components/esp_hw_support/ @esp-idf-codeowners/system @esp-idf-codeowners/peripherals
/components/esp_lcd/ @esp-idf-codeowners/peripherals /components/esp_lcd/ @esp-idf-codeowners/peripherals
/components/esp_local_ctrl/ @esp-idf-codeowners/app-utilities /components/esp_local_ctrl/ @esp-idf-codeowners/app-utilities
/components/esp_mm/ @esp-idf-codeowners/peripherals
/components/esp_netif/ @esp-idf-codeowners/network /components/esp_netif/ @esp-idf-codeowners/network
/components/esp_netif_stack/ @esp-idf-codeowners/network /components/esp_netif_stack/ @esp-idf-codeowners/network
/components/esp_partition/ @esp-idf-codeowners/storage /components/esp_partition/ @esp-idf-codeowners/storage

View File

@@ -139,12 +139,8 @@ static const char *TAG = "bootloader_flash";
63th block for bootloader_flash_read 63th block for bootloader_flash_read
*/ */
#define MMU_BLOCK0_VADDR SOC_DROM_LOW #define MMU_BLOCK0_VADDR SOC_DROM_LOW
#ifdef SOC_MMU_PAGE_SIZE_CONFIGURABLE #define MMAP_MMU_SIZE (DRAM0_CACHE_ADDRESS_HIGH - DRAM0_CACHE_ADDRESS_LOW) // This mmu size means that the mmu size to be mapped
#define MMAP_MMU_SIZE (DRAM0_CACHE_ADDRESS_HIGH(SPI_FLASH_MMU_PAGE_SIZE) - DRAM0_CACHE_ADDRESS_LOW - SPI_FLASH_MMU_PAGE_SIZE) // This mmu size means that the mmu size to be mapped #define MMU_BLOCK63_VADDR (MMU_BLOCK0_VADDR + MMAP_MMU_SIZE - SPI_FLASH_MMU_PAGE_SIZE)
#else
#define MMAP_MMU_SIZE (DRAM0_CACHE_ADDRESS_HIGH - DRAM0_CACHE_ADDRESS_LOW - SPI_FLASH_MMU_PAGE_SIZE) // This mmu size means that the mmu size to be mapped
#endif
#define MMU_BLOCK63_VADDR (MMU_BLOCK0_VADDR + MMAP_MMU_SIZE)
#define FLASH_READ_VADDR MMU_BLOCK63_VADDR #define FLASH_READ_VADDR MMU_BLOCK63_VADDR
#endif #endif

View File

@@ -466,11 +466,11 @@ static void test_rmt_multi_channels_trans(size_t channel0_mem_block_symbols, siz
#define TEST_RMT_CHANS 2 #define TEST_RMT_CHANS 2
#define TEST_LED_NUM 24 #define TEST_LED_NUM 24
#define TEST_STOP_TIME_NO_SYNCHRO_DELTA 150 #define TEST_STOP_TIME_NO_SYNCHRO_DELTA 150
#if CONFIG_IDF_TARGET_ESP32C6 #if CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C6
#define TEST_STOP_TIME_SYNCHRO_DELTA 400 #define TEST_STOP_TIME_SYNCHRO_DELTA 400
#else #else
#define TEST_STOP_TIME_SYNCHRO_DELTA 10 #define TEST_STOP_TIME_SYNCHRO_DELTA 10
#endif // CONFIG_IDF_TARGET_ESP32C6 #endif // #if CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C6
rmt_tx_channel_config_t tx_channel_cfg = { rmt_tx_channel_config_t tx_channel_cfg = {
.clk_src = RMT_CLK_SRC_DEFAULT, .clk_src = RMT_CLK_SRC_DEFAULT,
.resolution_hz = 10000000, // 10MHz, 1 tick = 0.1us (led strip needs a high resolution) .resolution_hz = 10000000, // 10MHz, 1 tick = 0.1us (led strip needs a high resolution)

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -9,7 +9,7 @@
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
// iterator to load partition tables in `test spi bus lock, with flash` will lead memory not free // iterator to load partition tables in `test spi bus lock, with flash` will lead memory not free
#define TEST_MEMORY_LEAK_THRESHOLD (250) #define TEST_MEMORY_LEAK_THRESHOLD (350)
static size_t before_free_8bit; static size_t before_free_8bit;
static size_t before_free_32bit; static size_t before_free_32bit;

View File

@@ -0,0 +1,24 @@
idf_build_get_property(target IDF_TARGET)
set(includes "include")
# Note: requires spi_flash for cache_utils, will be refactored
set(priv_requires heap spi_flash)
set(srcs)
if(NOT CONFIG_APP_BUILD_TYPE_PURE_RAM_APP)
set(srcs "esp_mmu_map.c"
"port/${target}/ext_mem_layout.c")
endif()
idf_component_register(SRCS ${srcs}
INCLUDE_DIRS ${includes}
PRIV_REQUIRES ${priv_requires})
if(NOT BOOTLOADER_BUILD)
if(CONFIG_SPIRAM)
# Use esp_psram for `esp_psram_extram_writeback_cache()` on ESP32
idf_component_optional_requires(PRIVATE esp_psram)
endif()
endif()

View File

@@ -0,0 +1,8 @@
menu "ESP Memory Management"
# Add MMU setting menu here
# Add Cache setting menu here
orsource "./Kconfig.mmap"
endmenu # ESP Memory Management

View File

@@ -0,0 +1,3 @@
menu "MMAP Configuration"
endmenu

View File

@@ -0,0 +1,724 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <string.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <inttypes.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_check.h"
#include "esp_heap_caps.h"
#include "soc/soc_caps.h"
#include "hal/cache_types.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include "hal/mmu_types.h"
#include "hal/mmu_hal.h"
#include "hal/mmu_ll.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/cache.h"
#endif
#include "esp_private/cache_utils.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#endif
#include "esp_private/esp_mmu_map_private.h"
#include "ext_mem_layout.h"
#include "esp_mmu_map.h"
//This is for size align
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
//This is for vaddr align
#define ALIGN_DOWN_BY(num, align) ((num) & (~((align) - 1)))
//This flag indicates the memory region is merged, we don't care about it anymore
#define MEM_REGION_MERGED -1
/**
* We have some hw related tests for vaddr region capabilites
* Use this macro to disable paddr check as we need to reuse certain paddr blocks
*/
#define ENABLE_PADDR_CHECK !ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR
static DRAM_ATTR const char *TAG = "mmap";
/**
* @brief MMU Memory Mapping Driver
*
* Driver Backgrounds:
*
* --------------------------------------------------------------------------------------------------------
* Memory Pool |
* --------------------------------------------------------------------------------------------------------
* | Memory Region 0 | Memory Region 1 | ... |
* --------------------------------------------------------------------------------------------------------
* | Block 0 | Slot 0 | Block 1 | Block 2 | ... | Slot 1 (final slot) | ... |
* --------------------------------------------------------------------------------------------------------
*
* - A block is a piece of vaddr range that is dynamically mapped. Blocks are doubly linked:
* Block 0 <-> Block 1 <-> Block 2
* - A Slot is the vaddr range between 2 blocks.
*/
/**
* Struct for a block
*/
typedef struct mem_block_ {
uint32_t laddr_start; //linear address start of this block
uint32_t laddr_end; //linear address end of this block
intptr_t vaddr_start; //virtual address start of this block
intptr_t vaddr_end; //virtual address end of this block
size_t size; //size of this block, should be aligned to MMU page size
int caps; //caps of this block, `mmu_mem_caps_t`
uint32_t paddr_start; //physical address start of this block
uint32_t paddr_end; //physical address end of this block
mmu_target_t target; //physical target that this block is mapped to
TAILQ_ENTRY(mem_block_) entries; //link entry
} mem_block_t;
/**
* Struct for a memory region
*/
typedef struct mem_region_ {
cache_bus_mask_t bus_id; //cache bus mask of this region
uint32_t start; //linear address start of this region
uint32_t end; //linear address end of this region
size_t region_size; //region size, in bytes
uint32_t free_head; //linear address free head of this region
size_t max_slot_size; //max slot size within this region
int caps; //caps of this region, `mmu_mem_caps_t`
mmu_target_t targets; //physical targets that this region is supported
TAILQ_HEAD(mem_block_head_, mem_block_) mem_block_head; //link head of allocated blocks within this region
} mem_region_t;
typedef struct {
/**
* number of memory regions that are available, after coalescing, this number should be smaller than or equal to `SOC_MMU_LINEAR_ADDRESS_REGION_NUM`
*/
uint32_t num_regions;
/**
* This saves the available MMU linear address regions,
* after reserving flash .rodata and .text, and after coalescing.
* Only the first `num_regions` items are valid
*/
mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
} mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums)
{
/**
* We follow the way how 1st bootloader load flash .text:
*
* - Now IBUS addresses (between `_instruction_reserved_start` and `_instruction_reserved_end`) are consecutive on all chips,
* we strongly rely on this to calculate the .text length
*/
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
irom_len_to_reserve += (uint32_t)&_instruction_reserved_start - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE);
irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].region_size <= irom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].max_slot_size = 0;
irom_len_to_reserve -= hw_mem_regions[i].region_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
hw_mem_regions[i].max_slot_size -= irom_len_to_reserve;
}
}
}
}
static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums)
{
/**
* Similarly, we follow the way how 1st bootloader load flash .rodata:
*/
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
drom_len_to_reserve += (uint32_t)&_rodata_reserved_start - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE);
drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].region_size <= drom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].max_slot_size = 0;
drom_len_to_reserve -= hw_mem_regions[i].region_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
hw_mem_regions[i].max_slot_size -= drom_len_to_reserve;
}
}
}
}
#endif //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
void esp_mmu_map_init(void)
{
mem_region_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
hw_mem_regions[i].region_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].max_slot_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
hw_mem_regions[i].targets = g_mmu_mem_regions[i].targets;
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
#endif
assert(hw_mem_regions[i].region_size % CONFIG_MMU_PAGE_SIZE == 0);
}
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
//First reserve memory regions used for irom and drom, as we must follow the way how 1st bootloader load them
s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
#endif //#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
//Now we can coalesce adjacent regions
for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
mem_region_t *a = &hw_mem_regions[i - 1];
mem_region_t *b = &hw_mem_regions[i];
if ((b->free_head == a->end) && (b->caps == a->caps) && (b->targets == a->targets)) {
a->caps = MEM_REGION_MERGED;
b->bus_id |= a->bus_id;
b->start = a->start;
b->region_size += a->region_size;
b->free_head = a->free_head;
b->max_slot_size += a->max_slot_size;
}
}
}
//Count the mem regions left after coalescing
uint32_t region_num = 0;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if(hw_mem_regions[i].caps != MEM_REGION_MERGED) {
region_num++;
}
}
ESP_EARLY_LOGV(TAG, "after coalescing, %d regions are left", region_num);
//Initialise `s_mmu_ctx.mem_regions[]`, as we've done all static allocation, to prepare available virtual memory regions
uint32_t available_region_idx = 0;
s_mmu_ctx.num_regions = region_num;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
continue;
}
memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mem_region_t));
available_region_idx++;
}
for (int i = 0; i < available_region_idx; i++) {
TAILQ_INIT(&s_mmu_ctx.mem_regions[i].mem_block_head);
}
assert(available_region_idx == region_num);
}
static esp_err_t s_mem_caps_check(mmu_mem_caps_t caps)
{
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
caps |= MMU_MEM_CAP_32BIT;
}
return ESP_OK;
}
esp_err_t esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps, mmu_target_t target, size_t *out_len)
{
ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
*out_len = 0;
size_t max = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if (((s_mmu_ctx.mem_regions[i].caps & caps) == caps) && ((s_mmu_ctx.mem_regions[i].targets & target) == target)) {
if (s_mmu_ctx.mem_regions[i].max_slot_size > max) {
max = s_mmu_ctx.mem_regions[i].max_slot_size;
}
}
}
*out_len = max;
return ESP_OK;
}
static int32_t s_find_available_region(mem_region_t *mem_regions, uint32_t region_nums, size_t size, mmu_mem_caps_t caps, mmu_target_t target)
{
int32_t found_region_id = -1;
for (int i = 0; i < region_nums; i++) {
if (((mem_regions[i].caps & caps) == caps) && ((mem_regions[i].targets & target) == target)) {
if (mem_regions[i].max_slot_size >= size) {
found_region_id = i;
break;
}
}
}
return found_region_id;
}
esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, mmu_target_t target, const void **out_ptr)
{
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
uint32_t laddr = 0;
int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
if (found_region_id == -1) {
ESP_EARLY_LOGE(TAG, "no such vaddr range");
return ESP_ERR_NOT_FOUND;
}
laddr = (uint32_t)s_mmu_ctx.mem_regions[found_region_id].free_head;
s_mmu_ctx.mem_regions[found_region_id].free_head += aligned_size;
s_mmu_ctx.mem_regions[found_region_id].max_slot_size -= aligned_size;
ESP_EARLY_LOGV(TAG, "found laddr is 0x%x", laddr);
uint32_t vaddr = 0;
if (caps & MMU_MEM_CAP_EXEC) {
vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION);
} else {
vaddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA);
}
*out_ptr = (void *)vaddr;
return ESP_OK;
}
#if CONFIG_IDF_TARGET_ESP32
/**
* On ESP32, due to hardware limitation, we don't have an
* easy way to sync between cache and external memory wrt
* certain range. So we do a full sync here
*/
static void IRAM_ATTR NOINLINE_ATTR s_cache_sync(void)
{
#if CONFIG_SPIRAM
esp_psram_extram_writeback_cache();
#endif //#if CONFIG_SPIRAM
Cache_Flush(0);
#if !CONFIG_FREERTOS_UNICORE
Cache_Flush(1);
#endif // !CONFIG_FREERTOS_UNICORE
}
#endif //#if CONFIG_IDF_TARGET_ESP32
static void IRAM_ATTR NOINLINE_ATTR s_do_cache_invalidate(uint32_t vaddr_start, uint32_t size)
{
#if CONFIG_IDF_TARGET_ESP32
s_cache_sync();
#else //Other chips
cache_hal_invalidate_addr(vaddr_start, size);
#endif // CONFIG_IDF_TARGET_ESP32
}
static void IRAM_ATTR NOINLINE_ATTR s_do_mapping(mmu_target_t target, uint32_t vaddr_start, esp_paddr_t paddr_start, uint32_t size)
{
/**
* Disable Cache, after this function, involved code and data should be placed in internal RAM.
*
* @note we call this for now, but this will be refactored to move out of `spi_flash`
*/
spi_flash_disable_interrupts_caches_and_other_cpu();
uint32_t actual_mapped_len = 0;
mmu_hal_map_region(0, target, vaddr_start, paddr_start, size, &actual_mapped_len);
#if (SOC_MMU_PERIPH_NUM == 2)
#if !CONFIG_FREERTOS_UNICORE
mmu_hal_map_region(1, target, vaddr_start, paddr_start, size, &actual_mapped_len);
#endif // #if !CONFIG_FREERTOS_UNICORE
#endif // #if (SOC_MMU_PERIPH_NUM == 2)
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
cache_ll_l1_enable_bus(0, bus_mask);
#if !CONFIG_FREERTOS_UNICORE
bus_mask = cache_ll_l1_get_bus(0, vaddr_start, size);
cache_ll_l1_enable_bus(1, bus_mask);
#endif
s_do_cache_invalidate(vaddr_start, size);
//enable Cache, after this function, internal RAM access is no longer mandatory
spi_flash_enable_interrupts_caches_and_other_cpu();
ESP_EARLY_LOGV(TAG, "actual_mapped_len is 0x%"PRIx32, actual_mapped_len);
}
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr)
{
esp_err_t ret = ESP_FAIL;
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
#if !SOC_SPIRAM_SUPPORTED || CONFIG_IDF_TARGET_ESP32
ESP_RETURN_ON_FALSE(!(target & MMU_TARGET_PSRAM0), ESP_ERR_NOT_SUPPORTED, TAG, "PSRAM is not supported");
#endif
ESP_RETURN_ON_FALSE((paddr_start % CONFIG_MMU_PAGE_SIZE == 0), ESP_ERR_INVALID_ARG, TAG, "paddr must be rounded up to the nearest multiple of CONFIG_MMU_PAGE_SIZE");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
if (found_region_id == -1) {
ESP_EARLY_LOGE(TAG, "no such vaddr range");
return ESP_ERR_NOT_FOUND;
}
//Now we're sure we can find an available block inside a certain region
mem_region_t *found_region = &s_mmu_ctx.mem_regions[found_region_id];
mem_block_t *dummy_head = NULL;
mem_block_t *dummy_tail = NULL;
mem_block_t *new_block = NULL;
if (TAILQ_EMPTY(&found_region->mem_block_head)) {
dummy_head = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(dummy_head, ESP_ERR_NO_MEM, err, TAG, "no mem");
dummy_head->laddr_start = found_region->free_head;
dummy_head->laddr_end = found_region->free_head;
//We don't care vaddr or paddr address for dummy head
dummy_head->size = 0;
dummy_head->caps = caps;
TAILQ_INSERT_HEAD(&found_region->mem_block_head, dummy_head, entries);
dummy_tail = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(dummy_tail, ESP_ERR_NO_MEM, err, TAG, "no mem");
dummy_tail->laddr_start = found_region->end;
dummy_tail->laddr_end = found_region->end;
//We don't care vaddr or paddr address for dummy tail
dummy_tail->size = 0;
dummy_tail->caps = caps;
TAILQ_INSERT_TAIL(&found_region->mem_block_head, dummy_tail, entries);
}
//Check if paddr is overlapped
mem_block_t *mem_block = NULL;
#if ENABLE_PADDR_CHECK
bool is_mapped = false;
TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
if (target == mem_block->target) {
if ((paddr_start >= mem_block->paddr_start) && ((paddr_start + aligned_size) <= mem_block->paddr_end)) {
//the to-be-mapped paddr region is mapped already
is_mapped = true;
break;
}
}
}
if (is_mapped) {
ESP_LOGW(TAG, "paddr region is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
*out_ptr = (void *)mem_block->vaddr_start;
return ESP_ERR_INVALID_STATE;
}
#endif //#if ENABLE_PADDR_CHECK
new_block = (mem_block_t *)heap_caps_calloc(1, sizeof(mem_block_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
ESP_GOTO_ON_FALSE(new_block, ESP_ERR_NO_MEM, err, TAG, "no mem");
//Reserve this block as it'll be mapped
bool found = false;
// Get the end address of the dummy_head block, which is always first block on the list
uint32_t last_end = TAILQ_FIRST(&found_region->mem_block_head)->laddr_end;
size_t slot_len = 0;
size_t max_slot_len = 0;
mem_block_t *found_block = NULL; //This stands for the block we found, whose slot between its prior block is where we will insert the new block to
TAILQ_FOREACH(mem_block, &found_region->mem_block_head, entries) {
slot_len = mem_block->laddr_start - last_end;
if (!found) {
if (slot_len >= aligned_size) {
//Found it
found = true;
found_block = mem_block;
slot_len -= aligned_size;
new_block->laddr_start = last_end;
}
}
max_slot_len = (slot_len > max_slot_len) ? slot_len : max_slot_len;
last_end = mem_block->laddr_end;
}
assert(found);
//insert the to-be-mapped new block to the list
TAILQ_INSERT_BEFORE(found_block, new_block, entries);
//Finally, we update the max_slot_size
found_region->max_slot_size = max_slot_len;
//Now we fill others according to the found `new_block->laddr_start`
new_block->laddr_end = new_block->laddr_start + aligned_size;
new_block->size = aligned_size;
new_block->caps = caps;
if (caps & MMU_MEM_CAP_EXEC) {
new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_INSTRUCTION);
new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_INSTRUCTION);
} else {
new_block->vaddr_start = mmu_ll_laddr_to_vaddr(new_block->laddr_start, MMU_VADDR_DATA);
new_block->vaddr_end = mmu_ll_laddr_to_vaddr(new_block->laddr_end, MMU_VADDR_DATA);
}
new_block->paddr_start = paddr_start;
new_block->paddr_end = paddr_start + aligned_size;
new_block->target = target;
//do mapping
s_do_mapping(target, new_block->vaddr_start, paddr_start, aligned_size);
*out_ptr = (void *)new_block->vaddr_start;
return ESP_OK;
err:
if (new_block) {
free(new_block);
}
if (dummy_tail) {
free(dummy_tail);
}
if (dummy_head) {
free(dummy_head);
}
return ret;
}
static void IRAM_ATTR NOINLINE_ATTR s_do_unmapping(uint32_t vaddr_start, uint32_t size)
{
/**
* Disable Cache, after this function, involved code and data should be placed in internal RAM.
*
* @note we call this for now, but this will be refactored to move out of `spi_flash`
*/
spi_flash_disable_interrupts_caches_and_other_cpu();
mmu_hal_unmap_region(0, vaddr_start, size);
#if (SOC_MMU_PERIPH_NUM == 2)
#if !CONFIG_FREERTOS_UNICORE
mmu_hal_unmap_region(1, vaddr_start, size);
#endif // #if !CONFIG_FREERTOS_UNICORE
#endif // #if (SOC_MMU_PERIPH_NUM == 2)
//enable Cache, after this function, internal RAM access is no longer mandatory
spi_flash_enable_interrupts_caches_and_other_cpu();
}
esp_err_t esp_mmu_unmap(void *ptr)
{
ESP_RETURN_ON_FALSE(ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
mem_region_t *region = NULL;
mem_block_t *mem_block = NULL;
uint32_t ptr_laddr = mmu_ll_vaddr_to_laddr((uint32_t)ptr);
size_t slot_len = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if (ptr_laddr >= s_mmu_ctx.mem_regions[i].free_head && ptr_laddr < s_mmu_ctx.mem_regions[i].end) {
region = &s_mmu_ctx.mem_regions[i];
}
}
ESP_RETURN_ON_FALSE(region, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer is outside external memory regions");
bool found = false;
mem_block_t *found_block = NULL;
TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
if (mem_block == TAILQ_FIRST(&region->mem_block_head) || mem_block == TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
//we don't care the dummy_head and the dummy_tail
continue;
}
//now we are only traversing the actual dynamically allocated blocks, dummy_head and dummy_tail are excluded already
if (mem_block->laddr_start == ptr_laddr) {
slot_len = TAILQ_NEXT(mem_block, entries)->laddr_start - TAILQ_PREV(mem_block, mem_block_head_, entries)->laddr_end;
region->max_slot_size = (slot_len > region->max_slot_size) ? slot_len : region->max_slot_size;
found = true;
found_block = mem_block;
break;
}
}
ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer isn't mapped yet");
//do unmap
s_do_unmapping(mem_block->vaddr_start, mem_block->size);
//remove the already unmapped block from the list
TAILQ_REMOVE(&region->mem_block_head, found_block, entries);
free(found_block);
return ESP_OK;
}
esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream)
{
char line[100];
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
fprintf(stream, "region %d:\n", i);
fprintf(stream, "%-15s %-14s %-14s %-12s %-12s %-12s\n", "Bus ID", "Start", "Free Head", "End", "Caps", "Max Slot Size");
char *buf = line;
size_t len = sizeof(line);
memset(line, 0x0, len);
snprintf(buf, len, "0x%-13x 0x%-12"PRIx32" 0x%-11"PRIx32" 0x%-10"PRIx32" 0x%-10x 0x%-8x\n",
s_mmu_ctx.mem_regions[i].bus_id,
s_mmu_ctx.mem_regions[i].start,
s_mmu_ctx.mem_regions[i].free_head,
s_mmu_ctx.mem_regions[i].end,
s_mmu_ctx.mem_regions[i].caps,
s_mmu_ctx.mem_regions[i].max_slot_size);
fputs(line, stream);
fprintf(stream, "mapped blocks:\n");
fprintf(stream, "%-4s %-13s %-12s %-12s %-6s %-13s %-11s\n", "ID", "Vaddr Start", "Vaddr End", "Block Size", "Caps", "Paddr Start", "Paddr End");
mem_region_t *region = &s_mmu_ctx.mem_regions[i];
mem_block_t *mem_block = NULL;
int id = 0;
TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
snprintf(buf, len, "%-4d 0x%-11x 0x%-10x 0x%-10x 0x%-4x 0x%-11"PRIx32" 0x%-8"PRIx32"\n",
id,
mem_block->vaddr_start,
mem_block->vaddr_end,
mem_block->size,
mem_block->caps,
mem_block->paddr_start,
mem_block->paddr_end);
fputs(line, stream);
id++;
}
}
fprintf(stream, "\n");
}
return ESP_OK;
}
/*---------------------------------------------------------------
Private dump functions, IRAM Safe
---------------------------------------------------------------*/
esp_err_t IRAM_ATTR esp_mmu_map_dump_mapped_blocks_private(void)
{
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
mem_region_t *region = &s_mmu_ctx.mem_regions[i];
mem_block_t *mem_block = NULL;
TAILQ_FOREACH(mem_block, &region->mem_block_head, entries) {
if (mem_block != TAILQ_FIRST(&region->mem_block_head) && mem_block != TAILQ_LAST(&region->mem_block_head, mem_block_head_)) {
ESP_DRAM_LOGI(TAG, "block vaddr_start: 0x%x", mem_block->vaddr_start);
ESP_DRAM_LOGI(TAG, "block vaddr_end: 0x%x", mem_block->vaddr_end);
ESP_DRAM_LOGI(TAG, "block size: 0x%x", mem_block->size);
ESP_DRAM_LOGI(TAG, "block caps: 0x%x\n", mem_block->caps);
ESP_DRAM_LOGI(TAG, "block paddr_start: 0x%x\n", mem_block->paddr_start);
ESP_DRAM_LOGI(TAG, "block paddr_end: 0x%x\n", mem_block->paddr_end);
}
}
ESP_DRAM_LOGI(TAG, "region bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
ESP_DRAM_LOGI(TAG, "region start: 0x%x", s_mmu_ctx.mem_regions[i].start);
ESP_DRAM_LOGI(TAG, "region end: 0x%x", s_mmu_ctx.mem_regions[i].end);
ESP_DRAM_LOGI(TAG, "region caps: 0x%x\n", s_mmu_ctx.mem_regions[i].caps);
}
return ESP_OK;
}
/*---------------------------------------------------------------
Helper APIs for conversion between vaddr and paddr
---------------------------------------------------------------*/
static bool NOINLINE_ATTR IRAM_ATTR s_vaddr_to_paddr(uint32_t vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
{
//we call this for now, but this will be refactored to move out of `spi_flash`
spi_flash_disable_interrupts_caches_and_other_cpu();
//On ESP32, core 1 settings should be the same as the core 0
bool is_mapped = mmu_hal_vaddr_to_paddr(0, vaddr, out_paddr, out_target);
spi_flash_enable_interrupts_caches_and_other_cpu();
return is_mapped;
}
esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target)
{
ESP_RETURN_ON_FALSE(vaddr && out_paddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
ESP_RETURN_ON_FALSE(mmu_ll_check_valid_ext_vaddr_region(0, (uint32_t)vaddr, 1), ESP_ERR_INVALID_ARG, TAG, "not a valid external virtual address");
esp_paddr_t paddr = 0;
mmu_target_t target = 0;
bool is_mapped = s_vaddr_to_paddr((uint32_t)vaddr, &paddr, &target);
ESP_RETURN_ON_FALSE(is_mapped, ESP_ERR_NOT_FOUND, TAG, "vaddr isn't mapped");
*out_paddr = paddr;
*out_target = target;
return ESP_OK;
}
static bool NOINLINE_ATTR IRAM_ATTR s_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
{
//we call this for now, but this will be refactored to move out of `spi_flash`
spi_flash_disable_interrupts_caches_and_other_cpu();
//On ESP32, core 1 settings should be the same as the core 0
bool found = mmu_hal_paddr_to_vaddr(0, paddr, target, type, out_vaddr);
spi_flash_enable_interrupts_caches_and_other_cpu();
return found;
}
esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, void **out_vaddr)
{
ESP_RETURN_ON_FALSE(out_vaddr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
uint32_t vaddr = 0;
bool found = false;
found = s_paddr_to_vaddr(paddr, target, type, &vaddr);
ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "paddr isn't mapped");
*out_vaddr = (void *)vaddr;
return ESP_OK;
}

View File

@@ -10,6 +10,7 @@
#include "sdkconfig.h" #include "sdkconfig.h"
#include "soc/soc_caps.h" #include "soc/soc_caps.h"
#include "hal/cache_types.h" #include "hal/cache_types.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@@ -17,11 +18,12 @@ extern "C" {
typedef struct { typedef struct {
intptr_t start; uint32_t start; //laddr start
intptr_t end; uint32_t end; //laddr end
size_t size; size_t size; //region size
cache_bus_mask_t bus_id; cache_bus_mask_t bus_id; //bus_id mask, for accessible cache buses
uint32_t caps; mmu_target_t targets; //region supported physical targets
uint32_t caps; //vaddr capabilities
} mmu_mem_region_t; } mmu_mem_region_t;
//These regions is referring to linear address //These regions is referring to linear address

View File

@@ -0,0 +1,142 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* MMU Memory Mapping Driver APIs for MMU supported memory
*
*
* Driver Backgrounds:
*
* --------------------------------------------------------------------------------------------------------
* Memory Pool |
* --------------------------------------------------------------------------------------------------------
* | Memory Region 0 | Memory Region 1 | ... |
* --------------------------------------------------------------------------------------------------------
* | Block 0 | Slot 0 | Block 1 | Block 2 | ... | Slot 1 (final slot) | ... |
* --------------------------------------------------------------------------------------------------------
*
* - A memory pool stands for the whole virtual address range that can be mapped to physical memory
* - A memory region is a range of virtual address with same attributes
* - A block is a piece of vaddr range that is dynamically mapped.
* - A Slot is the vaddr range between 2 blocks.
*/
/**
* @brief Physical memory type
*/
typedef uint32_t esp_paddr_t;
/**
* @brief Map a physical memory block to external virtual address block, with given capabilities.
*
* @note This API does not guarantee thread safety
*
* @param[in] paddr_start Start address of the physical memory block
* @param[in] size Size to be mapped. Size will be rounded up by to the nearest multiple of MMU page size
* @param[in] caps Memory capabilities, see `mmu_mem_caps_t`
* @param[in] target Physical memory target you're going to map to, see `mmu_target_t`
* @param[out] out_ptr Start address of the mapped virtual memory
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Invalid argument, see printed logs
* - ESP_ERR_NOT_SUPPORTED: Only on ESP32, PSRAM is not a supported physical memory target
* - ESP_ERR_NOT_FOUND: No enough size free block to use
* - ESP_ERR_NO_MEM: Out of memory, this API will allocate some heap memory for internal usage
* - ESP_ERR_INVALID_STATE: Paddr is mapped already, this API will return corresponding vaddr_start of the previously mapped block.
* Only to-be-mapped paddr block is totally enclosed by a previously mapped block will lead to this error:
* new_block_start new_block_end
* |-------- New Block --------|
* |--------------- Block ---------------|
* block_start block_end
*
*/
esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_mem_caps_t caps, mmu_target_t target, void **out_ptr);
/**
* @brief Unmap a previously mapped virtual memory block
*
* @note This API does not guarantee thread safety
*
* @param[in] ptr Start address of the virtual memory
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Null pointer
* - ESP_ERR_NOT_FOUND: Vaddr is not in external memory, or it's not mapped yet
*/
esp_err_t esp_mmu_unmap(void *ptr);
/**
* @brief Get largest consecutive free external virtual memory block size, with given capabilities and given physical target
*
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[in] target Physical memory target you're going to map to, see `mmu_target_t`.
* @param[out] out_len Largest free block length, in bytes.
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be null pointer
*/
esp_err_t esp_mmu_map_get_max_consecutive_free_block_size(mmu_mem_caps_t caps, mmu_target_t target, size_t *out_len);
/**
* Dump all the previously mapped blocks
*
* @note This API shall not be called from an ISR.
* @note This API does not guarantee thread safety
*
* @param stream stream to print information to; use stdout or stderr to print
* to the console; use fmemopen/open_memstream to print to a
* string buffer.
* @return
* - ESP_OK
*/
esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream);
/**
* @brief Convert virtual address to physical address
*
* @param[in] vaddr Virtual address
* @param[out] out_paddr Physical address
* @param[out] out_target Physical memory target, see `mmu_target_t`
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Null pointer, or vaddr is not within external memory
* - ESP_ERR_NOT_FOUND: Vaddr is not mapped yet
*/
esp_err_t esp_mmu_vaddr_to_paddr(void *vaddr, esp_paddr_t *out_paddr, mmu_target_t *out_target);
/**
* @brief Convert physical address to virtual address
*
* @param[in] paddr Physical address
* @param[in] target Physical memory target, see `mmu_target_t`
* @param[in] type Virtual address type, could be either instruction or data
* @param[out] out_vaddr Virtual address
*
* @return
* - ESP_OK
* - ESP_ERR_INVALID_ARG: Null pointer
* - ESP_ERR_NOT_FOUND: Paddr is not mapped yet
*/
esp_err_t esp_mmu_paddr_to_vaddr(esp_paddr_t paddr, mmu_target_t target, mmu_vaddr_t type, void **out_vaddr);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#include "hal/mmu_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Memory Mapping Private APIs for MMU supported memory
*/
/**
* @brief Initialise the MMU MMAP driver
*
* This is called once in the IDF startup code. Don't call it in applications
*/
void esp_mmu_map_init(void);
/**
* @brief Reserve a consecutive external virtual memory block, with given capabilities and size
*
* @note This private API shall be only called internally during startup stage. DO NOT call
* this API in your applications
*
* @param[in] size Size, in bytes, the amount of memory to find
* @param[in] caps Bitwise OR of `mmu_mem_caps_t` flags indicating the memory block capability
* @param[in] target Target memory type. See `mmu_target_t`
* @param[out] out_ptr Pointer to start address of the memory block that is reserved
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be wrong caps makeup, or null pointer
* - ESP_ERR_NOT_FOUND: Didn't find enough memory with give caps
*/
esp_err_t esp_mmu_map_reserve_block_with_caps(size_t size, mmu_mem_caps_t caps, mmu_target_t target, const void **out_ptr);
/*
* @brief Dump all mapped blocks
*
* @return
* - ESP_OK
*/
esp_err_t esp_mmu_map_dump_mapped_blocks_private(void);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT,
},
[1] = {
.start = SOC_MMU_DROM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DROM0_LINEAR),
.bus_id = CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
[2] = {
.start = SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DRAM1_LINEAR),
.bus_id = CACHE_BUS_DBUS1,
.targets = MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,26 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
#include "hal/mmu_types.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,26 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
#include "hal/mmu_types.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,58 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT,
},
[1] = {
.start = SOC_MMU_DROM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DROM0_LINEAR),
.bus_id = CACHE_BUS_IBUS2,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
[2] = {
.start = SOC_MMU_DPORT_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DPORT_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DPORT_LINEAR),
.bus_id = CACHE_BUS_DBUS2,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT,
},
[3] = {
.start = SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DRAM1_LINEAR),
.bus_id = CACHE_BUS_DBUS1,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
[4] = {
.start = SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_DRAM0_LINEAR),
.bus_id = CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,25 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "../ext_mem_layout.h"
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
[0] = {
.start = SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW,
.end = SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH,
.size = BUS_SIZE(SOC_MMU_IRAM0_LINEAR),
.bus_id = CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0,
.targets = MMU_TARGET_FLASH0 | MMU_TARGET_PSRAM0,
.caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT,
},
};

View File

@@ -0,0 +1,20 @@
# This is the project CMakeLists.txt file for the test subproject
cmake_minimum_required(VERSION 3.16)
set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/tools/unit-test-app/components")
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(mmap_test)
if(CONFIG_COMPILER_DUMP_RTL_FILES)
add_custom_target(check_test_app_sections ALL
COMMAND ${PYTHON} $ENV{IDF_PATH}/tools/ci/check_callgraph.py
--rtl-dir ${CMAKE_BINARY_DIR}/esp-idf/esp_mm/
--elf-file ${CMAKE_BINARY_DIR}/mmap_test.elf
find-refs
--from-sections=.iram0.text
--to-sections=.flash.text,.flash.rodata
--exit-code
DEPENDS ${elf}
)
endif()

View File

@@ -0,0 +1,2 @@
| Supported Targets | ESP32 | ESP32-C2 | ESP32-C3 | ESP32-C6 | ESP32-H2 | ESP32-S2 | ESP32-S3 |
| ----------------- | ----- | -------- | -------- | -------- | -------- | -------- | -------- |

View File

@@ -0,0 +1,15 @@
set(srcs "test_app_main.c")
list(APPEND srcs "test_mmap.c"
"test_mmap_hw.c")
# In order for the cases defined by `TEST_CASE` to be linked into the final elf,
# the component can be registered as WHOLE_ARCHIVE
idf_component_register(SRCS ${srcs}
PRIV_REQUIRES test_utils spi_flash esp_mm
WHOLE_ARCHIVE)
idf_component_get_property(lib_name esp_mm COMPONENT_LIB)
# Add this to skip checking mapping to a paddr range that is enclosed by a previous mapped paddr range
target_compile_definitions(${lib_name} PRIVATE ESP_MMAP_TEST_ALLOW_MAP_TO_MAPPED_PADDR)

View File

@@ -0,0 +1,64 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "unity.h"
#include "unity_test_utils.h"
#include "esp_heap_caps.h"
/**
* Hardware related tests, e.g.
* - traversing all vaddr range to check their attributes
*
* These tests need certain number of internal resources (heap memory), as they uses up the vaddr ranges
* On ESP32, it should be around 450
* On ESP32S2, it should be around 600
* On other chips, it should be around 400
*/
#define TEST_MEMORY_LEAK_THRESHOLD (-650)
static size_t before_free_8bit;
static size_t before_free_32bit;
static void check_leak(size_t before_free, size_t after_free, const char *type)
{
ssize_t delta = after_free - before_free;
printf("MALLOC_CAP_%s: Before %u bytes free, After %u bytes free (delta %d)\n", type, before_free, after_free, delta);
TEST_ASSERT_MESSAGE(delta >= TEST_MEMORY_LEAK_THRESHOLD, "memory leak");
}
void setUp(void)
{
before_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
before_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
}
void tearDown(void)
{
size_t after_free_8bit = heap_caps_get_free_size(MALLOC_CAP_8BIT);
size_t after_free_32bit = heap_caps_get_free_size(MALLOC_CAP_32BIT);
check_leak(before_free_8bit, after_free_8bit, "8BIT");
check_leak(before_free_32bit, after_free_32bit, "32BIT");
}
void app_main(void)
{
/*
_____ ___________ ___ ______ ___ ___ ______ _____ _____ _____ _____
| ___/ ___| ___ \ | \/ || \/ | / _ \ | ___ \ |_ _| ___/ ___|_ _|
| |__ \ `--.| |_/ / | . . || . . |/ /_\ \| |_/ / | | | |__ \ `--. | |
| __| `--. \ __/ | |\/| || |\/| || _ || __/ | | | __| `--. \ | |
| |___/\__/ / | | | | || | | || | | || | | | | |___/\__/ / | |
\____/\____/\_| \_| |_/\_| |_/\_| |_/\_| \_/ \____/\____/ \_/
*/
printf(" _____ ___________ ___ ______ ___ ___ ______ _____ _____ _____ _____\r\n");
printf("| ___/ ___| ___ \\ | \\/ || \\/ | / _ \\ | ___ \\ |_ _| ___/ ___|_ _|\r\n");
printf("| |__ \\ `--.| |_/ / | . . || . . |/ /_\\ \\| |_/ / | | | |__ \\ `--. | |\r\n");
printf("| __| `--. \\ __/ | |\\/| || |\\/| || _ || __/ | | | __| `--. \\ | |\r\n");
printf("| |___/\\__/ / | | | | || | | || | | || | | | | |___/\\__/ / | |\r\n");
printf("\\____/\\____/\\_| \\_| |_/\\_| |_/\\_| |_/\\_| \\_/ \\____/\\____/ \\_/\r\n");
unity_run_menu();
}

View File

@@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <sys/param.h>
#include <string.h>
#include "inttypes.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "unity.h"
#include "esp_heap_caps.h"
#include "esp_partition.h"
#include "esp_mmu_map.h"
#include "esp_rom_sys.h"
#define TEST_BLOCK_SIZE CONFIG_MMU_PAGE_SIZE
const static char *TAG = "MMU_TEST";
static const esp_partition_t *s_get_partition(void)
{
//Find the "storage1" partition defined in `partitions.csv`
const esp_partition_t *result = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_ANY, "storage1");
if (!result) {
ESP_LOGE(TAG, "Can't find the partition, please define it correctly in `partitions.csv`");
abort();
}
return result;
}
TEST_CASE("Can dump mapped block stats", "[mmu]")
{
const esp_partition_t *part = s_get_partition();
ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size);
void *ptr0 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr0));
void *ptr1 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr1));
void *ptr2 = NULL;
TEST_ESP_OK(esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr2));
esp_mmu_map_dump_mapped_blocks(stdout);
TEST_ESP_OK(esp_mmu_unmap(ptr0));
TEST_ESP_OK(esp_mmu_unmap(ptr1));
TEST_ESP_OK(esp_mmu_unmap(ptr2));
}

View File

@@ -0,0 +1,193 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include <sys/param.h>
#include <string.h>
#include <sys/queue.h>
#include "inttypes.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "unity.h"
#include "esp_heap_caps.h"
#include "esp_partition.h"
#include "esp_flash.h"
#include "esp_mmu_map.h"
#include "esp_rom_sys.h"
/**
* This file contains simple hw tests for vaddr memory regions
*
* Traversing all vaddr memory regions to see if they have correct capabilities
*/
const static char *TAG = "MMU_TEST";
static const esp_partition_t *s_get_partition(void)
{
//Find the "storage1" partition defined in `partitions.csv`
const esp_partition_t *result = esp_partition_find_first(ESP_PARTITION_TYPE_DATA, ESP_PARTITION_SUBTYPE_ANY, "storage1");
if (!result) {
ESP_LOGE(TAG, "Can't find the partition, please define it correctly in `partitions.csv`");
abort();
}
return result;
}
/**
* Do following two tests:
* - test all readable vaddr can map to flash
* - test all executable vaddr can map to flash
*
* manually. Do a reset before a test start, as each of the tests
* will map as much as possible, and don't do unmap.
*/
#define TEST_BLOCK_SIZE CONFIG_MMU_PAGE_SIZE
typedef struct test_block_info_ {
uint32_t vaddr;
LIST_ENTRY(test_block_info_) entries;
} test_block_info_t;
static LIST_HEAD(test_block_list_head_, test_block_info_) test_block_head;
static DRAM_ATTR uint8_t sector_buf[TEST_BLOCK_SIZE];
static void s_fill_random_data(uint8_t *buffer, size_t size, int random_seed)
{
srand(random_seed);
for (int i = 0 ; i < size; i++) {
buffer[i] = rand() % 0xff;
}
}
static bool s_test_mmap_data_by_random(uint8_t *mblock_ptr, size_t size, int random_seed)
{
srand(random_seed);
uint8_t *test_ptr = mblock_ptr;
for (int i = 0; i < size; i++) {
uint8_t test_data = rand() % 0xff;
if(test_data != test_ptr[i]) {
printf("i: %d\n", i);
printf("test_data: %d\n", test_data);
printf("test_ptr[%d]: %d\n", i, test_ptr[i]);
printf("sector_buf[%d]: %d\n", i, sector_buf[i]);
ESP_EARLY_LOGE(TAG, "FAIL!!!!!!");
return false;
}
}
return true;
}
TEST_CASE("test all readable vaddr can map to flash", "[mmu]")
{
//Get the partition used for SPI1 erase operation
const esp_partition_t *part = s_get_partition();
ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size);
//Erase whole region
TEST_ESP_OK(esp_flash_erase_region(part->flash_chip, part->address, part->size));
ESP_LOGI(TAG, "TEST_BLOCK_SIZE: 0x%x", TEST_BLOCK_SIZE);
int test_seed = 299;
s_fill_random_data(sector_buf, sizeof(sector_buf), test_seed);
ESP_LOGV(TAG, "rand seed: %d, write flash addr: %p...", test_seed, (void *)part->address);
TEST_ESP_OK(esp_flash_write(part->flash_chip, sector_buf, part->address, sizeof(sector_buf)));
esp_err_t ret = ESP_FAIL;
int count = 0;
LIST_INIT(&test_block_head);
while (1) {
test_block_info_t *block_info = heap_caps_calloc(1, sizeof(test_block_info_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
TEST_ASSERT(block_info && "no mem");
void *ptr = NULL;
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_READ, MMU_TARGET_FLASH0, &ptr);
if (ret == ESP_OK) {
ESP_LOGI(TAG, "ptr is %p", ptr);
bool success = s_test_mmap_data_by_random((uint8_t *)ptr, sizeof(sector_buf), test_seed);
TEST_ASSERT(success);
} else if (ret == ESP_ERR_NOT_FOUND) {
free(block_info);
break;
} else {
ESP_LOGI(TAG, "ret: 0x%x", ret);
TEST_ASSERT(false);
}
block_info->vaddr = (uint32_t)ptr;
LIST_INSERT_HEAD(&test_block_head, block_info, entries);
count++;
}
ESP_LOGI(TAG, "no more free block, finish test, test block size: 0x%x, count: 0d%d", TEST_BLOCK_SIZE, count);
test_block_info_t *block_to_free = LIST_FIRST(&test_block_head);
test_block_info_t *temp = NULL;
while (block_to_free) {
temp = block_to_free;
TEST_ESP_OK(esp_mmu_unmap((void *)block_to_free->vaddr));
block_to_free = LIST_NEXT(block_to_free, entries);
free(temp);
}
}
TEST_CASE("test all executable vaddr can map to flash", "[mmu]")
{
//Get the partition used for SPI1 erase operation
const esp_partition_t *part = s_get_partition();
ESP_LOGI(TAG, "found partition '%s' at offset 0x%"PRIx32" with size 0x%"PRIx32, part->label, part->address, part->size);
//Erase whole region
TEST_ESP_OK(esp_flash_erase_region(part->flash_chip, part->address, part->size));
esp_err_t ret = ESP_FAIL;
int count = 0;
LIST_INIT(&test_block_head);
while (1) {
test_block_info_t *block_info = heap_caps_calloc(1, sizeof(test_block_info_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
TEST_ASSERT(block_info && "no mem");
void *ptr = NULL;
ret = esp_mmu_map(part->address, TEST_BLOCK_SIZE, MMU_MEM_CAP_EXEC, MMU_TARGET_FLASH0, &ptr);
if (ret == ESP_OK) {
ESP_LOGI(TAG, "ptr is %p", ptr);
for (int i = 0; i < TEST_BLOCK_SIZE; i += 0x100) {
uint32_t vaddr = (uint32_t)ptr + i;
uint32_t paddr = 0;
mmu_target_t mem_target = 0;
TEST_ESP_OK(esp_mmu_vaddr_to_paddr((void *)vaddr, &paddr, &mem_target));
TEST_ASSERT(paddr == part->address + i);
ESP_LOGV(TAG, "paddr: %p, on %s", (void *)paddr, (mem_target) == MMU_TARGET_FLASH0 ? "Flash" : "PSRAM");
}
}
else if (ret == ESP_ERR_NOT_FOUND) {
free(block_info);
break;
} else {
TEST_ASSERT(false);
}
block_info->vaddr = (uint32_t)ptr;
LIST_INSERT_HEAD(&test_block_head, block_info, entries);
count++;
}
ESP_LOGI(TAG, "no more free block, finish test, test block size: 0x%x, count: 0d%d", TEST_BLOCK_SIZE, count);
test_block_info_t *block_to_free = LIST_FIRST(&test_block_head);
test_block_info_t *temp = NULL;
while (block_to_free) {
temp = block_to_free;
TEST_ESP_OK(esp_mmu_unmap((void *)block_to_free->vaddr));
block_to_free = LIST_NEXT(block_to_free, entries);
free(temp);
}
}

View File

@@ -0,0 +1,6 @@
# Name, Type, SubType, Offset, Size, Flags
# Note: if you have increased the bootloader size, make sure to update the offsets to avoid overlap
nvs, data, nvs, 0x9000, 0x6000,
phy_init, data, phy, 0xf000, 0x1000,
factory, app, factory, 0x10000, 1M,
storage1, data, fat, , 512K,
1 # Name, Type, SubType, Offset, Size, Flags
2 # Note: if you have increased the bootloader size, make sure to update the offsets to avoid overlap
3 nvs, data, nvs, 0x9000, 0x6000,
4 phy_init, data, phy, 0xf000, 0x1000,
5 factory, app, factory, 0x10000, 1M,
6 storage1, data, fat, , 512K,

View File

@@ -0,0 +1,18 @@
# SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded import Dut
@pytest.mark.supported_targets
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'release',
],
indirect=True,
)
def test_mmap(dut: Dut) -> None:
dut.run_all_single_board_cases(group='mmu', timeout=600)

View File

@@ -0,0 +1,6 @@
# set compilier optimization level
CONFIG_COMPILER_OPTIMIZATION_SIZE=y
CONFIG_BOOTLOADER_COMPILER_OPTIMIZATION_SIZE=y
# we can silent the assertion to save the binary footprint
CONFIG_COMPILER_OPTIMIZATION_ASSERTIONS_SILENT=y

View File

@@ -0,0 +1,7 @@
CONFIG_ESP_TASK_WDT=n
CONFIG_PARTITION_TABLE_CUSTOM=y
CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="partitions.csv"
CONFIG_PARTITION_TABLE_FILENAME="partitions.csv"
CONFIG_COMPILER_DUMP_RTL_FILES=y

View File

@@ -45,7 +45,7 @@ esp_err_t esp_partition_read(const esp_partition_t *partition,
/* Encrypted partitions need to be read via a cache mapping */ /* Encrypted partitions need to be read via a cache mapping */
const void *buf; const void *buf;
spi_flash_mmap_handle_t handle; esp_partition_mmap_handle_t handle;
esp_err_t err = esp_partition_mmap(partition, src_offset, size, esp_err_t err = esp_partition_mmap(partition, src_offset, size,
SPI_FLASH_MMAP_DATA, &buf, &handle); SPI_FLASH_MMAP_DATA, &buf, &handle);

View File

@@ -2,7 +2,7 @@ idf_build_get_property(target IDF_TARGET)
set(includes "include") set(includes "include")
set(priv_requires heap spi_flash) set(priv_requires heap spi_flash esp_mm)
if(${target} STREQUAL "esp32") if(${target} STREQUAL "esp32")
list(APPEND priv_requires bootloader_support) list(APPEND priv_requires bootloader_support)
# [refactor-todo]: requires "driver" for `spicommon_periph_claim` # [refactor-todo]: requires "driver" for `spicommon_periph_claim`
@@ -13,9 +13,7 @@ set(srcs)
if(CONFIG_SPIRAM) if(CONFIG_SPIRAM)
list(APPEND srcs "esp_psram.c" list(APPEND srcs "esp_psram.c"
"mmu.c" "mmu_psram_flash.c")
"mmu_psram_flash.c"
"ext_mem_layout.c")
if(${target} STREQUAL "esp32") if(${target} STREQUAL "esp32")
list(APPEND srcs "esp32/esp_psram_extram_cache.c" list(APPEND srcs "esp32/esp_psram_extram_cache.c"

View File

@@ -26,7 +26,8 @@
#include "esp_private/mmu_psram_flash.h" #include "esp_private/mmu_psram_flash.h"
#include "esp_psram_impl.h" #include "esp_psram_impl.h"
#include "esp_psram.h" #include "esp_psram.h"
#include "mmu.h" #include "esp_private/esp_mmu_map_private.h"
#include "esp_mmu_map.h"
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
#include "esp32/himem.h" #include "esp32/himem.h"
@@ -184,15 +185,6 @@ esp_err_t esp_psram_init(void)
ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size); ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size);
#endif //#if CONFIG_SPIRAM_RODATA #endif //#if CONFIG_SPIRAM_RODATA
/**
* For now,
* - we only need to use MMU driver when PSRAM is enabled
* - MMU driver isn't public
*
* So we call `esp_mmu_init()` here, instead of calling it in startup code.
*/
esp_mmu_init();
//----------------------------------Map the PSRAM physical range to MMU-----------------------------// //----------------------------------Map the PSRAM physical range to MMU-----------------------------//
/** /**
* @note 2 * @note 2
@@ -203,12 +195,12 @@ esp_err_t esp_psram_init(void)
size_t total_mapped_size = 0; size_t total_mapped_size = 0;
size_t size_to_map = 0; size_t size_to_map = 0;
size_t byte_aligned_size = 0; size_t byte_aligned_size = 0;
ret = esp_mmu_get_largest_free_block(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, &byte_aligned_size); ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
assert(ret == ESP_OK); assert(ret == ESP_OK);
size_to_map = MIN(byte_aligned_size, psram_available_size); size_to_map = MIN(byte_aligned_size, psram_available_size);
const void *v_start_8bit_aligned = NULL; const void *v_start_8bit_aligned = NULL;
ret = esp_mmu_find_vaddr_range(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, &v_start_8bit_aligned); ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_8bit_aligned);
assert(ret == ESP_OK); assert(ret == ESP_OK);
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
@@ -248,12 +240,12 @@ esp_err_t esp_psram_init(void)
size_to_map = psram_available_size - total_mapped_size; size_to_map = psram_available_size - total_mapped_size;
size_t word_aligned_size = 0; size_t word_aligned_size = 0;
ret = esp_mmu_get_largest_free_block(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, &word_aligned_size); ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
assert(ret == ESP_OK); assert(ret == ESP_OK);
size_to_map = MIN(word_aligned_size, size_to_map); size_to_map = MIN(word_aligned_size, size_to_map);
const void *v_start_32bit_aligned = NULL; const void *v_start_32bit_aligned = NULL;
ret = esp_mmu_find_vaddr_range(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, &v_start_32bit_aligned); ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_32bit_aligned);
assert(ret == ESP_OK); assert(ret == ESP_OK);
mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_32bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len); mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_32bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);

View File

@@ -1,77 +0,0 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdlib.h>
#include <stdint.h>
#include "sdkconfig.h"
#include "soc/ext_mem_defs.h"
#include "ext_mem_layout.h"
#include "mmu.h"
#if CONFIG_IDF_TARGET_ESP32
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
//Can be used for text
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM1_LINEAR), CACHE_BUS_IBUS1, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IROM0_LINEAR_ADDRESS_LOW, SOC_MMU_IROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IROM0_LINEAR), CACHE_BUS_IBUS2, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for rodata
{SOC_MMU_DROM0_LINEAR_ADDRESS_LOW, SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DROM0_LINEAR), CACHE_BUS_DBUS0, MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM1_LINEAR), CACHE_BUS_DBUS1, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#elif CONFIG_IDF_TARGET_ESP32S2
/**
* These regions is referring to linear address
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
//Can be used for text
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for text
{SOC_MMU_IRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM1_LINEAR), CACHE_BUS_IBUS1, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT},
//Can be used for Flash rodata, connected by IBUS
{SOC_MMU_DROM0_LINEAR_ADDRESS_LOW, SOC_MMU_DROM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DROM0_LINEAR), CACHE_BUS_IBUS2, MMU_MEM_CAP_READ | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DPORT_LINEAR_ADDRESS_LOW, SOC_MMU_DPORT_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DPORT_LINEAR), CACHE_BUS_DBUS2, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM1_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM1_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM1_LINEAR), CACHE_BUS_DBUS1, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
//Can be used for PSRAM
{SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_DRAM0_LINEAR), CACHE_BUS_DBUS0, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#elif CONFIG_IDF_TARGET_ESP32S3
/**
* The start addresses in this list should always be sorted from low to high, as MMU driver will need to
* coalesce adjacent regions
*/
const mmu_mem_region_t g_mmu_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {
/*linear start linear end bus size bus ID, bus capabilities */
/**
* Can be used for Flash text, rodata, and PSRAM
* IRAM0 linear address should be always the same as DRAM0 linear address
*/
{SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW, SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH, BUS_SIZE(SOC_MMU_IRAM0_LINEAR), CACHE_BUS_IBUS0 | CACHE_BUS_DBUS0, MMU_MEM_CAP_EXEC | MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT | MMU_MEM_CAP_8BIT},
};
#endif

View File

@@ -3,8 +3,6 @@ archive: libesp_psram.a
entries: entries:
if SPIRAM = y: if SPIRAM = y:
mmu (noflash)
if SPIRAM_MODE_QUAD = y: if SPIRAM_MODE_QUAD = y:
if IDF_TARGET_ESP32S3 = y: if IDF_TARGET_ESP32S3 = y:
esp_psram_impl_quad (noflash) esp_psram_impl_quad (noflash)

View File

@@ -1,267 +0,0 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* This file will be redesigned into MMU driver, to maintain all the external
* memory contexts including:
* - Flash
* - PSRAM
* - DDR
*
* Now only MMU-PSRAM related private APIs
*/
#include <stdint.h>
#include <string.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_check.h"
#include "soc/soc_caps.h"
#include "ext_mem_layout.h"
#include "freertos/FreeRTOS.h"
#include "hal/cache_types.h"
#include "hal/cache_ll.h"
#include "hal/mmu_types.h"
#include "hal/mmu_ll.h"
#include "mmu.h"
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
#define MMU_PAGE_SIZE CONFIG_MMU_PAGE_SIZE
//This flag indicates the memory region is merged, we don't care about it anymore
#define MEM_REGION_MERGED -1
static const char *TAG = "mmu";
extern int _instruction_reserved_start;
extern int _instruction_reserved_end;
extern int _rodata_reserved_start;
extern int _rodata_reserved_end;
typedef struct mmu_linear_mem_ {
cache_bus_mask_t bus_id;
intptr_t start;
intptr_t end;
size_t pool_size;
intptr_t free_head;
size_t free_size;
int caps;
} mmu_linear_mem_t;
typedef struct {
/**
* number of memory regions that are available, after coalescing, this number should be smaller than or equal to `SOC_MMU_LINEAR_ADDRESS_REGION_NUM`
*/
uint32_t num_regions;
/**
* This saves the available MMU linear address regions,
* after reserving flash .rodata and .text, and after coalescing.
* Only the first `num_regions` items are valid
*/
mmu_linear_mem_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
} mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
static void s_reserve_irom_region(mmu_linear_mem_t *hw_mem_regions, int region_nums)
{
/**
* We follow the way how 1st bootloader load flash .text:
*
* - Now IBUS addresses (between `_instruction_reserved_start` and `_instruction_reserved_end`) are consecutive on all chips,
* we strongly rely on this to calculate the .text length
*/
size_t irom_len_to_reserve = (uint32_t)&_instruction_reserved_end - (uint32_t)&_instruction_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_instruction_reserved_start)) == irom_len_to_reserve);
irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].pool_size <= irom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].free_size = 0;
irom_len_to_reserve -= hw_mem_regions[i].pool_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + irom_len_to_reserve;
hw_mem_regions[i].free_size -= irom_len_to_reserve;
}
}
}
}
static void s_reserve_drom_region(mmu_linear_mem_t *hw_mem_regions, int region_nums)
{
/**
* Similarly, we follow the way how 1st bootloader load flash .rodata:
*/
size_t drom_len_to_reserve = (uint32_t)&_rodata_reserved_end - (uint32_t)&_rodata_reserved_start;
assert((mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_end) - mmu_ll_vaddr_to_laddr((uint32_t)&_rodata_reserved_start)) == drom_len_to_reserve);
drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, MMU_PAGE_SIZE);
cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve);
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (bus_mask & hw_mem_regions[i].bus_id) {
if (hw_mem_regions[i].pool_size <= drom_len_to_reserve) {
hw_mem_regions[i].free_head = hw_mem_regions[i].end;
hw_mem_regions[i].free_size = 0;
drom_len_to_reserve -= hw_mem_regions[i].pool_size;
} else {
hw_mem_regions[i].free_head = hw_mem_regions[i].free_head + drom_len_to_reserve;
hw_mem_regions[i].free_size -= drom_len_to_reserve;
}
}
}
}
void esp_mmu_init(void)
{
mmu_linear_mem_t hw_mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM] = {};
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
hw_mem_regions[i].start = g_mmu_mem_regions[i].start;
hw_mem_regions[i].end = g_mmu_mem_regions[i].end;
hw_mem_regions[i].pool_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_size = g_mmu_mem_regions[i].size;
hw_mem_regions[i].free_head = g_mmu_mem_regions[i].start;
hw_mem_regions[i].bus_id = g_mmu_mem_regions[i].bus_id;
hw_mem_regions[i].caps = g_mmu_mem_regions[i].caps;
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
assert(__builtin_popcount(hw_mem_regions[i].bus_id) == 1);
#endif
assert(hw_mem_regions[i].pool_size % MMU_PAGE_SIZE == 0);
}
//First reserve memory regions used for irom and drom, as we must follow the way how 1st bootloader load them
s_reserve_irom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
s_reserve_drom_region(hw_mem_regions, SOC_MMU_LINEAR_ADDRESS_REGION_NUM);
if (SOC_MMU_LINEAR_ADDRESS_REGION_NUM > 1) {
//Now we can coalesce adjacent regions
for (int i = 1; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
mmu_linear_mem_t *a = &hw_mem_regions[i - 1];
mmu_linear_mem_t *b = &hw_mem_regions[i];
if ((b->free_head == a->end) && (b->caps == a->caps)) {
a->caps = MEM_REGION_MERGED;
b->bus_id |= a->bus_id;
b->start = a->start;
b->pool_size += a->pool_size;
b->free_head = a->free_head;
b->free_size += a->free_size;
}
}
}
//Count the mem regions left after coalescing
uint32_t region_num = 0;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if(hw_mem_regions[i].caps != MEM_REGION_MERGED) {
region_num++;
}
}
ESP_EARLY_LOGV(TAG, "after coalescing, %d regions are left", region_num);
//Initialise `s_mmu_ctx.mem_regions[]`, as we've done all static allocation, to prepare available virtual memory regions
uint32_t available_region_idx = 0;
s_mmu_ctx.num_regions = region_num;
for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) {
if (hw_mem_regions[i].caps == MEM_REGION_MERGED) {
continue;
}
memcpy(&s_mmu_ctx.mem_regions[available_region_idx], &hw_mem_regions[i], sizeof(mmu_linear_mem_t));
available_region_idx++;
}
assert(available_region_idx == region_num);
}
esp_err_t esp_mmu_get_largest_free_block(int caps, size_t *out_len)
{
ESP_RETURN_ON_FALSE(out_len, ESP_ERR_INVALID_ARG, TAG, "null pointer");
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
}
*out_len = 0;
size_t max = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if ((s_mmu_ctx.mem_regions[i].caps & caps) == caps) {
if (s_mmu_ctx.mem_regions[i].free_size > max) {
max = s_mmu_ctx.mem_regions[i].free_size;
}
}
}
*out_len = max;
return ESP_OK;
}
esp_err_t esp_mmu_find_vaddr_range(size_t size, uint32_t caps, const void **out_ptr)
{
ESP_RETURN_ON_FALSE(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
if (caps & MMU_MEM_CAP_EXEC) {
if ((caps & MMU_MEM_CAP_8BIT) || (caps & MMU_MEM_CAP_WRITE)) {
//None of the executable memory are expected to be 8-bit accessible or writable.
return ESP_ERR_INVALID_ARG;
}
caps |= MMU_MEM_CAP_32BIT;
}
size_t aligned_size = ALIGN_UP_BY(size, MMU_PAGE_SIZE);
bool is_match = false;
uint32_t laddr = 0;
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if ((s_mmu_ctx.mem_regions[i].caps & caps) == caps) {
if (s_mmu_ctx.mem_regions[i].free_size < aligned_size) {
continue;
} else {
laddr = (uint32_t)s_mmu_ctx.mem_regions[i].free_head;
s_mmu_ctx.mem_regions[i].free_head += aligned_size;
s_mmu_ctx.mem_regions[i].free_size -= aligned_size;
is_match = true;
break;
}
}
}
ESP_RETURN_ON_FALSE(is_match, ESP_ERR_NOT_FOUND, TAG, "no such vaddr range");
ESP_EARLY_LOGV(TAG, "found laddr is 0x%x", laddr);
if (caps & MMU_MEM_CAP_EXEC) {
laddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_INSTRUCTION);
} else {
laddr = mmu_ll_laddr_to_vaddr(laddr, MMU_VADDR_DATA);
}
*out_ptr = (void *)laddr;
return ESP_OK;
}
esp_err_t esp_mmu_dump_region_usage(void)
{
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
ESP_EARLY_LOGI(TAG, "bus_id: 0x%x", s_mmu_ctx.mem_regions[i].bus_id);
ESP_EARLY_LOGI(TAG, "start: 0x%x", s_mmu_ctx.mem_regions[i].start);
ESP_EARLY_LOGI(TAG, "end: 0x%x", s_mmu_ctx.mem_regions[i].end);
ESP_EARLY_LOGI(TAG, "pool_size: 0x%x", s_mmu_ctx.mem_regions[i].pool_size);
ESP_EARLY_LOGI(TAG, "free_head: 0x%x", s_mmu_ctx.mem_regions[i].free_head);
ESP_EARLY_LOGI(TAG, "free_size: 0x%x", s_mmu_ctx.mem_regions[i].free_size);
ESP_EARLY_LOGI(TAG, "caps: 0x%x\n", s_mmu_ctx.mem_regions[i].caps);
}
return ESP_OK;
}

View File

@@ -1,76 +0,0 @@
/*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stdlib.h>
#include <stdint.h>
#include "esp_err.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* This file will be moved out of `esp_psram` component. And will be
* future MMU driver, to maintain all the external memory contexts including:
* - Flash
* - PSRAM
* - DDR
*
* Now only support ESP32, ESP32S2, ESP32S3 virtual address maintenance, and is internal
*/
#define MMU_MEM_CAP_EXEC (1<<0)
#define MMU_MEM_CAP_READ (1<<1)
#define MMU_MEM_CAP_WRITE (1<<2)
#define MMU_MEM_CAP_32BIT (1<<3)
#define MMU_MEM_CAP_8BIT (1<<4)
/**
* @brief Initialise the MMU driver
*
* This is called once in the IDF startup code. Don't call it in applications
*/
void esp_mmu_init(void);
/**
* @brief Get largest consecutive free external virtual memory block, with given capabilities
*
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[out] out_len Largest free block length, in bytes.
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be null pointer
*/
esp_err_t esp_mmu_get_largest_free_block(int caps, size_t *out_len);
/**
* @brief Find a consecutive external virtual memory range, with given capabilities and size
*
* @param[in] size Size, in bytes, the amount of memory to find
* @param[in] caps Bitwise OR of MMU_MEM_CAP_* flags indicating the memory block
* @param[out] out_ptr Pointer to the memory range found
*
* @return
* - ESP_OK: On success
* - ESP_ERR_INVALID_ARG: Invalid arguments, could be wrong caps makeup, or null pointer
* - ESP_ERR_NOT_FOUND: Didn't find enough memory with give caps
*/
esp_err_t esp_mmu_find_vaddr_range(size_t size, uint32_t caps, const void **out_ptr);
/**
* @brief Dump internal memory region usage
*
* @return
* - ESP_OK: On success
*/
esp_err_t esp_mmu_dump_region_usage(void);
#ifdef __cplusplus
}
#endif

View File

@@ -55,7 +55,7 @@ else()
idf_component_register(SRCS "${srcs}" idf_component_register(SRCS "${srcs}"
INCLUDE_DIRS include INCLUDE_DIRS include
PRIV_REQUIRES spi_flash esp_timer PRIV_REQUIRES spi_flash esp_timer esp_mm
# [refactor-todo] requirements due to init code, # [refactor-todo] requirements due to init code,
# should be removable once using component init functions # should be removable once using component init functions
# link-time registration is used. # link-time registration is used.

View File

@@ -319,14 +319,22 @@ SECTIONS
*(.tbss) *(.tbss)
*(.tbss.*) *(.tbss.*)
_thread_local_end = ABSOLUTE(.); _thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4); . = ALIGN(4);
} >default_rodata_seg } >default_rodata_seg
_flash_rodata_align = ALIGNOF(.flash.rodata); _flash_rodata_align = ALIGNOF(.flash.rodata);
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) : .flash.rodata_noload (NOLOAD) :
{ {
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4); . = ALIGN (4);
mapping[rodata_noload] mapping[rodata_noload]
} > default_rodata_seg } > default_rodata_seg

View File

@@ -97,7 +97,7 @@ SECTIONS
.flash.text : .flash.text :
{ {
_stext = .; _stext = .;
_instruction_reserved_start = ABSOLUTE(.); _instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.); _text_start = ABSOLUTE(.);
mapping[flash_text] mapping[flash_text]
@@ -116,7 +116,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size; . += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.); _text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); _instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .; _etext = .;
/** /**
@@ -141,11 +141,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be /* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */ * added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20; . = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg } > default_rodata_seg
.flash.appdesc : ALIGN(0x10) .flash.appdesc : ALIGN(0x10)
{ {
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.); _rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */ *(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@@ -222,7 +222,6 @@ SECTIONS
*(.tbss) *(.tbss)
*(.tbss.*) *(.tbss.*)
_thread_local_end = ABSOLUTE(.); _thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame)); . = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg } > default_rodata_seg
@@ -246,8 +245,17 @@ SECTIONS
__eh_frame_hdr_end = ABSOLUTE(.); __eh_frame_hdr_end = ABSOLUTE(.);
} > default_rodata_seg } > default_rodata_seg
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) : .flash.rodata_noload (NOLOAD) :
{ {
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4); . = ALIGN (4);
mapping[rodata_noload] mapping[rodata_noload]
} > default_rodata_seg } > default_rodata_seg

View File

@@ -207,7 +207,7 @@ SECTIONS
.flash.text : .flash.text :
{ {
_stext = .; _stext = .;
_instruction_reserved_start = ABSOLUTE(.); _instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.); _text_start = ABSOLUTE(.);
mapping[flash_text] mapping[flash_text]
@@ -226,7 +226,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size; . += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.); _text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); _instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .; _etext = .;
/** /**
@@ -251,11 +251,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be /* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */ * added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20; . = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg } > default_rodata_seg
.flash.appdesc : ALIGN(0x10) .flash.appdesc : ALIGN(0x10)
{ {
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.); _rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */ *(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@@ -332,7 +332,6 @@ SECTIONS
*(.tbss) *(.tbss)
*(.tbss.*) *(.tbss.*)
_thread_local_end = ABSOLUTE(.); _thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame)); . = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg } > default_rodata_seg
@@ -356,8 +355,17 @@ SECTIONS
__eh_frame_hdr_end = ABSOLUTE(.); __eh_frame_hdr_end = ABSOLUTE(.);
} > default_rodata_seg } > default_rodata_seg
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) : .flash.rodata_noload (NOLOAD) :
{ {
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4); . = ALIGN (4);
mapping[rodata_noload] mapping[rodata_noload]
} > default_rodata_seg } > default_rodata_seg

View File

@@ -240,7 +240,7 @@ SECTIONS
.flash.text : .flash.text :
{ {
_stext = .; _stext = .;
_instruction_reserved_start = ABSOLUTE(.); _instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.); _text_start = ABSOLUTE(.);
mapping[flash_text] mapping[flash_text]
@@ -259,7 +259,7 @@ SECTIONS
. += _esp_flash_mmap_prefetch_pad_size; . += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.); _text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); _instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .; _etext = .;
/** /**
@@ -284,11 +284,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be /* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */ * added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20; . = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg } > default_rodata_seg
.flash.appdesc : ALIGN(0x10) .flash.appdesc : ALIGN(0x10)
{ {
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.); _rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */ *(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@@ -365,7 +365,6 @@ SECTIONS
*(.tbss) *(.tbss)
*(.tbss.*) *(.tbss.*)
_thread_local_end = ABSOLUTE(.); _thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame)); . = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg } > default_rodata_seg
@@ -389,8 +388,17 @@ SECTIONS
__eh_frame_hdr_end = ABSOLUTE(.); __eh_frame_hdr_end = ABSOLUTE(.);
} > default_rodata_seg } > default_rodata_seg
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) : .flash.rodata_noload (NOLOAD) :
{ {
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4); . = ALIGN (4);
mapping[rodata_noload] mapping[rodata_noload]
} > default_rodata_seg } > default_rodata_seg

View File

@@ -64,7 +64,7 @@ MEMORY
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/* Flash mapped instruction data */ /* Flash mapped instruction data */
iram0_2_seg (RX) : org = 0x42000020, len = (IDRAM0_2_SEG_SIZE >> 1) -0x20 irom_seg (RX) : org = 0x42000020, len = IDRAM0_2_SEG_SIZE - 0x20
/** /**
* (0x20 offset above is a convenience for the app binary image generation. * (0x20 offset above is a convenience for the app binary image generation.
@@ -83,9 +83,9 @@ MEMORY
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/* Flash mapped constant data */ /* Flash mapped constant data */
drom0_0_seg (R) : org = 0x42000020 + (IDRAM0_2_SEG_SIZE >> 1), len = (IDRAM0_2_SEG_SIZE >> 1)-0x20 drom_seg (R) : org = 0x42000020, len = IDRAM0_2_SEG_SIZE - 0x20
/* (See iram0_2_seg for meaning of 0x20 offset in the above.) */ /* (See irom_seg for meaning of 0x20 offset in the above.) */
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS #endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/** /**
@@ -122,19 +122,19 @@ REGION_ALIAS("rtc_slow_seg", rtc_iram_seg );
REGION_ALIAS("rtc_data_location", rtc_iram_seg ); REGION_ALIAS("rtc_data_location", rtc_iram_seg );
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
REGION_ALIAS("default_code_seg", iram0_2_seg); REGION_ALIAS("default_code_seg", irom_seg);
#else #else
REGION_ALIAS("default_code_seg", iram0_0_seg); REGION_ALIAS("default_code_seg", iram0_0_seg);
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS #endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
REGION_ALIAS("default_rodata_seg", drom0_0_seg); REGION_ALIAS("default_rodata_seg", drom_seg);
#else #else
REGION_ALIAS("default_rodata_seg", dram0_0_seg); REGION_ALIAS("default_rodata_seg", dram0_0_seg);
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS #endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/** /**
* If rodata default segment is placed in `drom0_0_seg`, then flash's first rodata section must * If rodata default segment is placed in `drom_seg`, then flash's first rodata section must
* also be first in the segment. * also be first in the segment.
*/ */
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS

View File

@@ -210,7 +210,7 @@ SECTIONS
.flash.text : .flash.text :
{ {
_stext = .; _stext = .;
_instruction_reserved_start = ABSOLUTE(.); _instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.); _text_start = ABSOLUTE(.);
mapping[flash_text] mapping[flash_text]
@@ -229,7 +229,7 @@ SECTIONS
. += 16; . += 16;
_text_end = ABSOLUTE(.); _text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); _instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .; _etext = .;
/** /**
@@ -254,11 +254,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be /* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */ * added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20; . = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .;
} > default_rodata_seg } > default_rodata_seg
.flash.appdesc : ALIGN(0x10) .flash.appdesc : ALIGN(0x10)
{ {
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.); _rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */ *(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@@ -335,7 +335,6 @@ SECTIONS
*(.tbss) *(.tbss)
*(.tbss.*) *(.tbss.*)
_thread_local_end = ABSOLUTE(.); _thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame)); . = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg } > default_rodata_seg
@@ -359,6 +358,20 @@ SECTIONS
__eh_frame_hdr_end = ABSOLUTE(.); __eh_frame_hdr_end = ABSOLUTE(.);
} > default_rodata_seg } > default_rodata_seg
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) :
{
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4);
} > default_rodata_seg
/* Marks the end of IRAM code segment */ /* Marks the end of IRAM code segment */
.iram0.text_end (NOLOAD) : .iram0.text_end (NOLOAD) :
{ {

View File

@@ -335,14 +335,22 @@ SECTIONS
*(.tbss) *(.tbss)
*(.tbss.*) *(.tbss.*)
_thread_local_end = ABSOLUTE(.); _thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4); . = ALIGN(4);
} >default_rodata_seg } >default_rodata_seg
_flash_rodata_align = ALIGNOF(.flash.rodata); _flash_rodata_align = ALIGNOF(.flash.rodata);
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) : .flash.rodata_noload (NOLOAD) :
{ {
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4); . = ALIGN (4);
mapping[rodata_noload] mapping[rodata_noload]
} > default_rodata_seg } > default_rodata_seg

View File

@@ -288,11 +288,11 @@ SECTIONS
/* Prepare the alignment of the section above. Few bytes (0x20) must be /* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */ * added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20; . = ALIGN(_esp_mmu_block_size) + 0x20;
_rodata_reserved_start = .; /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
} > default_rodata_seg } > default_rodata_seg
.flash.appdesc : ALIGN(0x10) .flash.appdesc : ALIGN(0x10)
{ {
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.); _rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */ *(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
@@ -361,14 +361,22 @@ SECTIONS
*(.tbss) *(.tbss)
*(.tbss.*) *(.tbss.*)
_thread_local_end = ABSOLUTE(.); _thread_local_end = ABSOLUTE(.);
_rodata_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address */
. = ALIGN(4); . = ALIGN(4);
} > default_rodata_seg } > default_rodata_seg
_flash_rodata_align = ALIGNOF(.flash.rodata); _flash_rodata_align = ALIGNOF(.flash.rodata);
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) : .flash.rodata_noload (NOLOAD) :
{ {
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4); . = ALIGN (4);
mapping[rodata_noload] mapping[rodata_noload]
} > default_rodata_seg } > default_rodata_seg

View File

@@ -68,6 +68,7 @@
#include "esp32c2/memprot.h" #include "esp32c2/memprot.h"
#endif #endif
#include "esp_private/esp_mmu_map_private.h"
#if CONFIG_SPIRAM #if CONFIG_SPIRAM
#include "esp_psram.h" #include "esp_psram.h"
#include "esp_private/esp_psram_extram.h" #include "esp_private/esp_psram_extram.h"
@@ -339,12 +340,10 @@ void IRAM_ATTR call_start_cpu0(void)
/* If we need use SPIRAM, we should use data cache, or if we want to access rodata, we also should use data cache. /* If we need use SPIRAM, we should use data cache, or if we want to access rodata, we also should use data cache.
Configure the mode of data : cache size, cache associated ways, cache line size. Configure the mode of data : cache size, cache associated ways, cache line size.
Enable data cache, so if we don't use SPIRAM, it just works. */ Enable data cache, so if we don't use SPIRAM, it just works. */
#if CONFIG_SPIRAM_BOOT_INIT
extern void esp_config_data_cache_mode(void); extern void esp_config_data_cache_mode(void);
esp_config_data_cache_mode(); esp_config_data_cache_mode();
Cache_Enable_DCache(0); Cache_Enable_DCache(0);
#endif #endif
#endif
#if CONFIG_IDF_TARGET_ESP32S3 #if CONFIG_IDF_TARGET_ESP32S3
/* Configure the mode of instruction cache : cache size, cache line size. */ /* Configure the mode of instruction cache : cache size, cache line size. */
@@ -401,6 +400,8 @@ void IRAM_ATTR call_start_cpu0(void)
mspi_timing_flash_tuning(); mspi_timing_flash_tuning();
#endif #endif
esp_mmu_map_init();
#if CONFIG_SPIRAM_BOOT_INIT #if CONFIG_SPIRAM_BOOT_INIT
if (esp_psram_init() != ESP_OK) { if (esp_psram_init() != ESP_OK) {
#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY #if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY

View File

@@ -28,8 +28,7 @@ menu "Cache config"
choice ESP32S2_DATA_CACHE_SIZE choice ESP32S2_DATA_CACHE_SIZE
prompt "Data cache size" prompt "Data cache size"
default ESP32S2_DATA_CACHE_0KB if !SPIRAM default ESP32S2_DATA_CACHE_8KB
default ESP32S2_DATA_CACHE_8KB if SPIRAM
help help
Data cache size to be set on application startup. Data cache size to be set on application startup.
If you use 0KB data cache, the other 16KB will be added to the heap If you use 0KB data cache, the other 16KB will be added to the heap

View File

@@ -12,6 +12,7 @@
#include "hal/cache_hal.h" #include "hal/cache_hal.h"
#include "hal/cache_types.h" #include "hal/cache_types.h"
#include "hal/cache_ll.h" #include "hal/cache_ll.h"
#include "hal/mmu_ll.h"
#include "soc/soc_caps.h" #include "soc/soc_caps.h"
#if CONFIG_IDF_TARGET_ESP32S2 #if CONFIG_IDF_TARGET_ESP32S2
@@ -112,3 +113,10 @@ void cache_hal_enable(cache_type_t type)
} }
#endif #endif
} }
void cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size)
{
//Now only esp32 has 2 MMUs, this file doesn't build on esp32
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(0, vaddr, size));
Cache_Invalidate_Addr(vaddr, size);
}

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -18,6 +18,8 @@
extern "C" { extern "C" {
#endif #endif
#define MMU_LL_PSRAM_ENTRY_START_ID 1152
/** /**
* Convert MMU virtual address to linear address * Convert MMU virtual address to linear address
* *
@@ -75,7 +77,7 @@ static inline mmu_page_size_t mmu_ll_get_page_size(uint32_t mmu_id)
__attribute__((always_inline)) __attribute__((always_inline))
static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size) static inline void mmu_ll_set_page_size(uint32_t mmu_id, uint32_t size)
{ {
HAL_ASSERT(size == MMU_PAGE_64KB); //ONly supports `MMU_PAGE_64KB`
} }
/** /**
@@ -101,6 +103,96 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
(ADDRESS_IN_DROM0_CACHE(vaddr_start) && ADDRESS_IN_DROM0_CACHE(vaddr_end)); (ADDRESS_IN_DROM0_CACHE(vaddr_start) && ADDRESS_IN_DROM0_CACHE(vaddr_end));
} }
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/**
* To get the MMU table entry id to be mapped
*
* @param mmu_id MMU ID
* @param vaddr virtual address to be mapped
*
* @return
* MMU table entry id
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
{
(void)mmu_id;
uint32_t offset = 0;
uint32_t shift_code = 0;
uint32_t vaddr_mask = 0;
//On ESP32, we only use PID0 and PID1
if (ADDRESS_IN_DROM0_CACHE(vaddr)) {
offset = 0;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_IRAM0_CACHE(vaddr)) {
offset = 64;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_IRAM1_CACHE(vaddr)) {
offset = 128;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_IROM0_CACHE(vaddr)) {
offset = 192;
shift_code = 16;
vaddr_mask = MMU_VADDR_MASK;
} else if (ADDRESS_IN_DRAM1_CACHE(vaddr)) {
//PSRAM page size 32KB
offset = MMU_LL_PSRAM_ENTRY_START_ID;
shift_code = 15;
vaddr_mask = MMU_VADDR_MASK >> 1;
} else {
HAL_ASSERT(false);
}
return offset + ((vaddr & vaddr_mask) >> shift_code);
}
/**
* Format the paddr to be mappable
*
* @param mmu_id MMU ID
* @param paddr physical address to be mapped
* @param target paddr memory target, not used
*
* @return
* mmu_val - paddr in MMU table supported format
*/
__attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{
(void)mmu_id;
uint32_t shift_code = 0;
if (target == MMU_TARGET_FLASH0) {
shift_code = 16;
} else {
//PSRAM page size 32KB
shift_code = 15;
}
return paddr >> shift_code;
}
/** /**
* Write to the MMU table to map the virtual memory and the physical memory * Write to the MMU table to map the virtual memory and the physical memory
* *
@@ -113,7 +205,6 @@ __attribute__((always_inline))
static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target) static inline void mmu_ll_write_entry(uint32_t mmu_id, uint32_t entry_id, uint32_t mmu_val, mmu_target_t target)
{ {
(void)target; (void)target;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE(); DPORT_INTERRUPT_DISABLE();
switch (mmu_id) { switch (mmu_id) {
@@ -140,7 +231,6 @@ __attribute__((always_inline))
static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id) static inline uint32_t mmu_ll_read_entry(uint32_t mmu_id, uint32_t entry_id)
{ {
uint32_t mmu_value; uint32_t mmu_value;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE(); DPORT_INTERRUPT_DISABLE();
switch (mmu_id) { switch (mmu_id) {
@@ -167,7 +257,6 @@ __attribute__((always_inline))
static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id) static inline void mmu_ll_set_entry_invalid(uint32_t mmu_id, uint32_t entry_id)
{ {
HAL_ASSERT(entry_id < MMU_ENTRY_NUM); HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE(); DPORT_INTERRUPT_DISABLE();
switch (mmu_id) { switch (mmu_id) {
case MMU_TABLE_CORE0: case MMU_TABLE_CORE0:
@@ -196,22 +285,164 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
/** /**
* Get MMU table entry is invalid * Check MMU table entry value is valid
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param entry_id MMU entry ID * @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid *
* @return Ture for MMU entry is valid; False for invalid
*/ */
__attribute__((always_inline)) static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{ {
(void)mmu_id; (void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE(); DPORT_INTERRUPT_DISABLE();
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id]); uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id]);
DPORT_INTERRUPT_RESTORE(); DPORT_INTERRUPT_RESTORE();
return (mmu_value & MMU_INVALID) ? true : false; return (mmu_value & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
HAL_ASSERT(mmu_ll_check_entry_valid(mmu_id, entry_id));
return (entry_id >= MMU_LL_PSRAM_ENTRY_START_ID) ? MMU_TARGET_PSRAM0 : MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
DPORT_INTERRUPT_DISABLE();
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[entry_id]);
DPORT_INTERRUPT_RESTORE();
return (entry_id >= MMU_LL_PSRAM_ENTRY_START_ID) ? (mmu_value << 15) : (mmu_value << 16);
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
(void)target;
DPORT_INTERRUPT_DISABLE();
if (target == MMU_TARGET_FLASH0) {
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
if (!(mmu_value & MMU_INVALID)) {
if (mmu_value == mmu_val) {
DPORT_INTERRUPT_RESTORE();
return i;
}
}
}
} else {
//For PSRAM, we only use PID 0/1. Its start entry ID is MMU_LL_PSRAM_ENTRY_START_ID (1152), and 128 entries are used for PSRAM
for (int i = MMU_LL_PSRAM_ENTRY_START_ID; i < 1280; i++) {
uint32_t mmu_value = DPORT_SEQUENCE_REG_READ((uint32_t)&DPORT_PRO_FLASH_MMU_TABLE[i]);
if (!(mmu_value & MMU_INVALID)) {
if (mmu_value == mmu_val) {
DPORT_INTERRUPT_RESTORE();
return i;
}
}
}
}
DPORT_INTERRUPT_RESTORE();
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
(void)type;
uint32_t vaddr_base = 0;
uint32_t shift_code = 0;
if (entry_id < 64) {
//first 64 entries are for DROM0
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0;
shift_code = 16;
vaddr_base = 0x3f400000;
} else if (entry_id >= 64 && entry_id < 128) {
//second 64 entries are for IRAM0
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 64;
shift_code = 16;
vaddr_base = 0x40000000;
} else if (entry_id >= 128 && entry_id < 192) {
//third 64 entries are for IRAM1
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 128;
shift_code = 16;
vaddr_base = 0x40000000;
} else if (entry_id >= 192 && entry_id < 256) {
//fourth 64 entries are for IROM0
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 192;
shift_code = 16;
vaddr_base = 0x40000000;
} else if (entry_id >= MMU_LL_PSRAM_ENTRY_START_ID) {
//starting from 1152, 128 entries are for DRAM1
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= MMU_LL_PSRAM_ENTRY_START_ID;
shift_code = 15;
vaddr_base = 0x3f800000;
} else {
HAL_ASSERT(false);
}
return vaddr_base + (entry_id << shift_code);
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -12,7 +12,6 @@
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/cache_types.h" #include "hal/cache_types.h"
#include "hal/assert.h" #include "hal/assert.h"
#include "sdkconfig.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
@@ -54,9 +53,9 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v
cache_bus_mask_t mask = 0; cache_bus_mask_t mask = 0;
uint32_t vaddr_end = vaddr_start + len - 1; uint32_t vaddr_end = vaddr_start + len - 1;
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) { if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_IBUS0; mask |= CACHE_BUS_IBUS0;
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) { } else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_DBUS0; mask |= CACHE_BUS_DBUS0;
} else { } else {
HAL_ASSERT(0); //Out of region HAL_ASSERT(0); //Out of region

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -13,7 +13,6 @@
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/assert.h" #include "hal/assert.h"
#include "hal/mmu_types.h" #include "hal/mmu_types.h"
#include "sdkconfig.h"
#ifdef __cplusplus #ifdef __cplusplus
@@ -21,11 +20,36 @@ extern "C" {
#endif #endif
/** /**
* @brief The real MMU page size get from Kconfig. * Convert MMU virtual address to linear address
* *
* @note Only used in this file * @param vaddr virtual address
*
* @return linear address
*/ */
#define MMU_LL_PAGE_SIZE (CONFIG_MMU_PAGE_SIZE) static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/** /**
* Get MMU page size * Get MMU page size
@@ -69,7 +93,25 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
{ {
(void)mmu_id; (void)mmu_id;
uint32_t vaddr_end = vaddr_start + len - 1; uint32_t vaddr_end = vaddr_start + len - 1;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_IRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_DRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)); return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
} }
/** /**
@@ -102,7 +144,7 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
HAL_ASSERT(shift_code); HAL_ASSERT(shift_code);
} }
return ((vaddr & MMU_VADDR_MASK(page_size)) >> shift_code); return ((vaddr & MMU_VADDR_MASK) >> shift_code);
} }
/** /**
@@ -110,14 +152,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param paddr physical address to be mapped * @param paddr physical address to be mapped
* @param target paddr memory target, not used
* *
* @return * @return
* mmu_val - paddr in MMU table supported format * mmu_val - paddr in MMU table supported format
*/ */
__attribute__((always_inline)) __attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{ {
(void)mmu_id; (void)mmu_id;
(void)target;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0; uint32_t shift_code = 0;
@@ -201,19 +245,122 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
/** /**
* Get MMU table entry is invalid * Check MMU table entry value is valid
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param entry_id MMU entry ID * @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid *
* @return Ture for MMU entry is valid; False for invalid
*/ */
__attribute__((always_inline)) static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{ {
(void)mmu_id; (void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM); HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false; return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
default:
HAL_ASSERT(shift_code);
}
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << shift_code;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
default:
HAL_ASSERT(shift_code);
}
uint32_t laddr = entry_id << shift_code;
return mmu_ll_laddr_to_vaddr(laddr, type);
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -8,6 +8,7 @@
#pragma once #pragma once
#include "esp_types.h"
#include "soc/extmem_reg.h" #include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/assert.h" #include "hal/assert.h"
@@ -18,6 +19,38 @@
extern "C" { extern "C" {
#endif #endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/** /**
* Get MMU page size * Get MMU page size
* *
@@ -64,6 +97,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end)); return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
} }
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/** /**
* To get the MMU table entry id to be mapped * To get the MMU table entry id to be mapped
* *
@@ -85,14 +136,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param paddr physical address to be mapped * @param paddr physical address to be mapped
* @param target paddr memory target, not used
* *
* @return * @return
* mmu_val - paddr in MMU table supported format * mmu_val - paddr in MMU table supported format
*/ */
__attribute__((always_inline)) __attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{ {
(void)mmu_id; (void)mmu_id;
(void)target;
return paddr >> 16; return paddr >> 16;
} }
@@ -159,19 +212,93 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
/** /**
* Get MMU table entry is invalid * Check MMU table entry value is valid
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param entry_id MMU entry ID * @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid *
* @return Ture for MMU entry is valid; False for invalid
*/ */
__attribute__((always_inline)) static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{ {
(void)mmu_id; (void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM); HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false; return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t laddr = entry_id << 16;
return mmu_ll_laddr_to_vaddr(laddr, type);
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -54,9 +54,9 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v
cache_bus_mask_t mask = 0; cache_bus_mask_t mask = 0;
uint32_t vaddr_end = vaddr_start + len - 1; uint32_t vaddr_end = vaddr_start + len - 1;
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) { if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_IBUS0; mask |= CACHE_BUS_IBUS0;
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) { } else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_DBUS0; mask |= CACHE_BUS_DBUS0;
} else { } else {
HAL_ASSERT(0); //Out of region HAL_ASSERT(0); //Out of region

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -20,11 +20,30 @@ extern "C" {
#endif #endif
/** /**
* @brief The real MMU page size get from Kconfig. * Convert MMU virtual address to linear address
* *
* @note Only used in this file * @param vaddr virtual address
*
* @return linear address
*/ */
#define MMU_LL_PAGE_SIZE (CONFIG_MMU_PAGE_SIZE) static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
//On ESP32C6, I/D share the same vaddr range
return SOC_MMU_IBUS_VADDR_BASE | laddr;
}
__attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void) __attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void)
{ {
@@ -82,7 +101,25 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
{ {
(void)mmu_id; (void)mmu_id;
uint32_t vaddr_end = vaddr_start + len; uint32_t vaddr_end = vaddr_start + len;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_IRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_DRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)); return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
} }
/** /**
@@ -110,10 +147,13 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
case MMU_PAGE_16KB: case MMU_PAGE_16KB:
shift_code = 14; shift_code = 14;
break; break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default: default:
HAL_ASSERT(shift_code); HAL_ASSERT(shift_code);
} }
return ((vaddr & MMU_VADDR_MASK(page_size)) >> shift_code); return ((vaddr & MMU_VADDR_MASK) >> shift_code);
} }
/** /**
@@ -121,14 +161,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param paddr physical address to be mapped * @param paddr physical address to be mapped
* @param target paddr memory target, not used
* *
* @return * @return
* mmu_val - paddr in MMU table supported format * mmu_val - paddr in MMU table supported format
*/ */
__attribute__((always_inline)) __attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{ {
(void)mmu_id; (void)mmu_id;
(void)target;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0; uint32_t shift_code = 0;
switch (page_size) { switch (page_size) {
@@ -141,6 +183,9 @@ static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr)
case MMU_PAGE_16KB: case MMU_PAGE_16KB:
shift_code = 14; shift_code = 14;
break; break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default: default:
HAL_ASSERT(shift_code); HAL_ASSERT(shift_code);
} }
@@ -163,8 +208,8 @@ __attribute__((always_inline)) static inline void mmu_ll_write_entry(uint32_t mm
if (mmu_ll_cache_encryption_enabled()) { if (mmu_ll_cache_encryption_enabled()) {
mmu_val |= MMU_SENSITIVE; mmu_val |= MMU_SENSITIVE;
} }
/* Note: for ESP32-C6, invert invalid bit for compatible with upper-layer software */
mmu_raw_value = mmu_val ^ MMU_INVALID_MASK; mmu_raw_value = mmu_val | MMU_VALID;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id); REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), mmu_raw_value); REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), mmu_raw_value);
} }
@@ -186,8 +231,10 @@ __attribute__((always_inline)) static inline uint32_t mmu_ll_read_entry(uint32_t
if (mmu_ll_cache_encryption_enabled()) { if (mmu_ll_cache_encryption_enabled()) {
mmu_raw_value &= ~MMU_SENSITIVE; mmu_raw_value &= ~MMU_SENSITIVE;
} }
/* Note: for ESP32-C6, invert invalid bit for compatible with upper-layer software */ if (!(mmu_raw_value & MMU_VALID)) {
ret = mmu_raw_value ^ MMU_INVALID_MASK; return 0;
}
ret = mmu_raw_value & MMU_VALID_VAL_MASK;
return ret; return ret;
} }
@@ -204,23 +251,6 @@ __attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint3
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID); REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID);
} }
/**
* Get MMU table entry is invalid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid
*/
__attribute__((always_inline)) static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
uint32_t mmu_raw_value;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
/* Note: for ESP32-C6, the invalid-bit of MMU: 0 for invalid, 1 for valid */
return (mmu_raw_value & MMU_INVALID_MASK) ? false : true;
}
/** /**
* Unmap all the items in the MMU table * Unmap all the items in the MMU table
* *
@@ -234,6 +264,134 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
} }
/**
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID) ? true : false;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) << shift_code;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), i);
if ((REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
uint32_t laddr = entry_id << shift_code;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -54,9 +54,9 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v
cache_bus_mask_t mask = 0; cache_bus_mask_t mask = 0;
uint32_t vaddr_end = vaddr_start + len - 1; uint32_t vaddr_end = vaddr_start + len - 1;
if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) { if (vaddr_start >= IRAM0_CACHE_ADDRESS_LOW && vaddr_end < IRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_IBUS0; mask |= CACHE_BUS_IBUS0;
} else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE)) { } else if (vaddr_start >= DRAM0_CACHE_ADDRESS_LOW && vaddr_end < DRAM0_CACHE_ADDRESS_HIGH) {
mask |= CACHE_BUS_DBUS0; mask |= CACHE_BUS_DBUS0;
} else { } else {
HAL_ASSERT(0); //Out of region HAL_ASSERT(0); //Out of region

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -19,12 +19,33 @@
extern "C" { extern "C" {
#endif #endif
/** /**
* @brief The real MMU page size get from Kconfig. * Convert MMU virtual address to linear address
* *
* @note Only used in this file * @param vaddr virtual address
*
* @return linear address
*/ */
#define MMU_LL_PAGE_SIZE (CONFIG_MMU_PAGE_SIZE) static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
//On ESP32C6, I/D share the same vaddr range
return SOC_MMU_IBUS_VADDR_BASE | laddr;
}
__attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void) __attribute__((always_inline)) static inline bool mmu_ll_cache_encryption_enabled(void)
{ {
@@ -83,7 +104,25 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
{ {
(void)mmu_id; (void)mmu_id;
uint32_t vaddr_end = vaddr_start + len; uint32_t vaddr_end = vaddr_start + len;
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_IRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start, MMU_LL_PAGE_SIZE) && ADDRESS_IN_DRAM0_CACHE(vaddr_end, MMU_LL_PAGE_SIZE)); return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
}
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
} }
/** /**
@@ -119,7 +158,7 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
HAL_ASSERT(shift_code); HAL_ASSERT(shift_code);
} }
return ((vaddr & MMU_VADDR_MASK(page_size)) >> shift_code); return ((vaddr & MMU_VADDR_MASK) >> shift_code);
} }
/** /**
@@ -132,9 +171,10 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* mmu_val - paddr in MMU table supported format * mmu_val - paddr in MMU table supported format
*/ */
__attribute__((always_inline)) __attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{ {
(void)mmu_id; (void)mmu_id;
(void)target;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0; uint32_t shift_code = 0;
@@ -217,28 +257,23 @@ __attribute__((always_inline)) static inline void mmu_ll_set_entry_invalid(uint3
REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID); REG_WRITE(SPI_MEM_MMU_ITEM_CONTENT_REG(0), MMU_INVALID);
} }
/** // /**
* Get MMU table entry is invalid // * Get MMU table entry is invalid
* // *
* @param mmu_id MMU ID // * @param mmu_id MMU ID
* @param entry_id MMU entry ID // * @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid // * return ture for MMU entry is invalid, false for valid
*/ // */
__attribute__((always_inline)) static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id) // __attribute__((always_inline)) static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{ // {
(void)mmu_id; // (void)mmu_id;
uint32_t mmu_raw_value; // uint32_t mmu_raw_value;
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id); // REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)); // mmu_raw_value = REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0));
/* Note: for ESP32-H2, the invalid-bit of MMU: 0 for invalid, 1 for valid */ // /* Note: for ESP32-H2, the invalid-bit of MMU: 0 for invalid, 1 for valid */
return (mmu_raw_value & MMU_INVALID_MASK) ? false : true; // return (mmu_raw_value & MMU_INVALID_MASK) ? false : true;
} // }
#ifdef __cplusplus
}
#endif
/** /**
@@ -253,3 +288,137 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
mmu_ll_set_entry_invalid(mmu_id, i); mmu_ll_set_entry_invalid(mmu_id, i);
} }
} }
/**
* Check MMU table entry value is valid
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Ture for MMU entry is valid; False for invalid
*/
static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID) ? true : false;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), entry_id);
return (REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) << shift_code;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
REG_WRITE(SPI_MEM_MMU_ITEM_INDEX_REG(0), i);
if ((REG_READ(SPI_MEM_MMU_ITEM_CONTENT_REG(0)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
uint32_t shift_code = 0;
switch (page_size) {
case MMU_PAGE_64KB:
shift_code = 16;
break;
case MMU_PAGE_32KB:
shift_code = 15;
break;
case MMU_PAGE_16KB:
shift_code = 14;
break;
case MMU_PAGE_8KB:
shift_code = 13;
break;
default:
HAL_ASSERT(shift_code);
}
uint32_t laddr = entry_id << shift_code;
return mmu_ll_laddr_to_vaddr(laddr, type);
}
#ifdef __cplusplus
}
#endif

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -8,6 +8,7 @@
#pragma once #pragma once
#include "esp_types.h"
#include "soc/extmem_reg.h" #include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/assert.h" #include "hal/assert.h"
@@ -18,6 +19,38 @@
extern "C" { extern "C" {
#endif #endif
/**
* Convert MMU virtual address to linear address
*
* @param vaddr virtual address
*
* @return linear address
*/
static inline uint32_t mmu_ll_vaddr_to_laddr(uint32_t vaddr)
{
return vaddr & SOC_MMU_LINEAR_ADDR_MASK;
}
/**
* Convert MMU linear address to virtual address
*
* @param laddr linear address
* @param vaddr_type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*
* @return virtual address
*/
static inline uint32_t mmu_ll_laddr_to_vaddr(uint32_t laddr, mmu_vaddr_t vaddr_type)
{
uint32_t vaddr_base = 0;
if (vaddr_type == MMU_VADDR_DATA) {
vaddr_base = SOC_MMU_DBUS_VADDR_BASE;
} else {
vaddr_base = SOC_MMU_IBUS_VADDR_BASE;
}
return vaddr_base | laddr;
}
/** /**
* Get MMU page size * Get MMU page size
* *
@@ -64,6 +97,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end)); return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
} }
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/** /**
* To get the MMU table entry id to be mapped * To get the MMU table entry id to be mapped
* *
@@ -85,14 +136,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param paddr physical address to be mapped * @param paddr physical address to be mapped
* @param target paddr memory target, not used
* *
* @return * @return
* mmu_val - paddr in MMU table supported format * mmu_val - paddr in MMU table supported format
*/ */
__attribute__((always_inline)) __attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{ {
(void)mmu_id; (void)mmu_id;
(void)target;
return paddr >> 16; return paddr >> 16;
} }
@@ -159,19 +212,93 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
/** /**
* Get MMU table entry is invalid * Check MMU table entry value is valid
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param entry_id MMU entry ID * @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid *
* @return Ture for MMU entry is valid; False for invalid
*/ */
__attribute__((always_inline)) static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{ {
(void)mmu_id; (void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM); HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false; return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return MMU_TARGET_FLASH0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t laddr = entry_id << 16;
return mmu_ll_laddr_to_vaddr(laddr, type);
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -38,7 +38,7 @@ __attribute__((always_inline))
#endif #endif
static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len)
{ {
HAL_ASSERT(cache_id == 0); (void)cache_id;
cache_bus_mask_t mask = 0; cache_bus_mask_t mask = 0;
uint32_t vaddr_end = vaddr_start + len - 1; uint32_t vaddr_end = vaddr_start + len - 1;
@@ -87,7 +87,7 @@ __attribute__((always_inline))
#endif #endif
static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask)
{ {
HAL_ASSERT(cache_id == 0); (void)cache_id;
uint32_t ibus_mask = 0; uint32_t ibus_mask = 0;
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_PRO_ICACHE_MASK_IRAM0 : 0; ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_PRO_ICACHE_MASK_IRAM0 : 0;
@@ -111,7 +111,7 @@ static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t ma
__attribute__((always_inline)) __attribute__((always_inline))
static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask)
{ {
HAL_ASSERT(cache_id == 0); (void)cache_id;
uint32_t ibus_mask = 0; uint32_t ibus_mask = 0;
ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_PRO_ICACHE_MASK_IRAM0 : 0; ibus_mask |= (mask & CACHE_BUS_IBUS0) ? EXTMEM_PRO_ICACHE_MASK_IRAM0 : 0;

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -104,6 +104,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (on_ibus || on_dbus); return (on_ibus || on_dbus);
} }
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/** /**
* To get the MMU table entry id to be mapped * To get the MMU table entry id to be mapped
* *
@@ -143,14 +161,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param paddr physical address to be mapped * @param paddr physical address to be mapped
* @param target paddr memory target, not used
* *
* @return * @return
* mmu_val - paddr in MMU table supported format * mmu_val - paddr in MMU table supported format
*/ */
__attribute__((always_inline)) __attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{ {
(void)mmu_id; (void)mmu_id;
(void)target;
return paddr >> 16; return paddr >> 16;
} }
@@ -217,19 +237,137 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
/** /**
* Get MMU table entry is invalid * Check MMU table entry value is valid
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param entry_id MMU entry ID * @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid *
* @return Ture for MMU entry is valid; False for invalid
*/ */
__attribute__((always_inline)) static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{ {
(void)mmu_id; (void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM); HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false; return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
HAL_ASSERT(mmu_ll_check_entry_valid(mmu_id, entry_id));
if ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_ACCESS_FLASH) {
return MMU_TARGET_FLASH0;
} else {
return MMU_TARGET_PSRAM0;
}
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
(void)type;
uint32_t vaddr_base = 0;
if (entry_id < 0x40) {
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 0;
vaddr_base = 0x40000000;
} else if (entry_id >= 0x40 && entry_id < 0x80) {
if (type != MMU_VADDR_INSTRUCTION) {
return 0;
}
entry_id -= 0x40;
vaddr_base = 0x40000000;
} else if (entry_id >= 0x80 && entry_id < 0xC0) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0x80;
vaddr_base = 0x3f000000;
} else if (entry_id >= 0xC0 && entry_id < 0x100) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0xC0;
vaddr_base = 0x3f000000;
} else if (entry_id >= 0x100 && entry_id < 0x140) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0x100;
vaddr_base = 0x3f000000;
} else if (entry_id >= 0x140 && entry_id < 0x180) {
if (type != MMU_VADDR_DATA) {
return 0;
}
entry_id -= 0x140;
vaddr_base = 0x3f000000;
} else {
HAL_ASSERT(false);
}
return vaddr_base + (entry_id << 16);
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -8,6 +8,7 @@
#pragma once #pragma once
#include "esp_types.h"
#include "soc/extmem_reg.h" #include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/assert.h" #include "hal/assert.h"
@@ -96,6 +97,24 @@ static inline bool mmu_ll_check_valid_ext_vaddr_region(uint32_t mmu_id, uint32_t
return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end)); return (ADDRESS_IN_IRAM0_CACHE(vaddr_start) && ADDRESS_IN_IRAM0_CACHE(vaddr_end)) || (ADDRESS_IN_DRAM0_CACHE(vaddr_start) && ADDRESS_IN_DRAM0_CACHE(vaddr_end));
} }
/**
* Check if the paddr region is valid
*
* @param mmu_id MMU ID
* @param paddr_start start of the physical address
* @param len length, in bytes
*
* @return
* True for valid
*/
static inline bool mmu_ll_check_valid_paddr_region(uint32_t mmu_id, uint32_t paddr_start, uint32_t len)
{
(void)mmu_id;
return (paddr_start < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
(len < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM)) &&
((paddr_start + len - 1) < (mmu_ll_get_page_size(mmu_id) * MMU_MAX_PADDR_PAGE_NUM));
}
/** /**
* To get the MMU table entry id to be mapped * To get the MMU table entry id to be mapped
* *
@@ -117,14 +136,16 @@ static inline uint32_t mmu_ll_get_entry_id(uint32_t mmu_id, uint32_t vaddr)
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param paddr physical address to be mapped * @param paddr physical address to be mapped
* @param target paddr memory target, not used
* *
* @return * @return
* mmu_val - paddr in MMU table supported format * mmu_val - paddr in MMU table supported format
*/ */
__attribute__((always_inline)) __attribute__((always_inline))
static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr) static inline uint32_t mmu_ll_format_paddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target)
{ {
(void)mmu_id; (void)mmu_id;
(void)target;
return paddr >> 16; return paddr >> 16;
} }
@@ -191,19 +212,94 @@ static inline void mmu_ll_unmap_all(uint32_t mmu_id)
} }
/** /**
* Get MMU table entry is invalid * Check MMU table entry value is valid
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param entry_id MMU entry ID * @param entry_id MMU entry ID
* return ture for MMU entry is invalid, false for valid *
* @return Ture for MMU entry is valid; False for invalid
*/ */
__attribute__((always_inline)) static inline bool mmu_ll_check_entry_valid(uint32_t mmu_id, uint32_t entry_id)
static inline bool mmu_ll_get_entry_is_invalid(uint32_t mmu_id, uint32_t entry_id)
{ {
(void)mmu_id; (void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM); HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? true : false; return (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4) & MMU_INVALID) ? false : true;
}
/**
* Get the MMU table entry target
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return Target, see `mmu_target_t`
*/
static inline mmu_target_t mmu_ll_get_entry_target(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
bool target_code = (*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_TYPE;
return (target_code == MMU_ACCESS_FLASH) ? MMU_TARGET_FLASH0 : MMU_TARGET_PSRAM0;
}
/**
* Convert MMU entry ID to paddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
*
* @return paddr base
*/
static inline uint32_t mmu_ll_entry_id_to_paddr_base(uint32_t mmu_id, uint32_t entry_id)
{
(void)mmu_id;
HAL_ASSERT(entry_id < MMU_ENTRY_NUM);
return ((*(uint32_t *)(DR_REG_MMU_TABLE + entry_id * 4)) & MMU_VALID_VAL_MASK) << 16;
}
/**
* Find the MMU table entry ID based on table map value
* @note This function can only find the first match entry ID. However it is possible that a physical address
* is mapped to multiple virtual addresses
*
* @param mmu_id MMU ID
* @param mmu_val map value to be read from MMU table standing for paddr
* @param target physical memory target, see `mmu_target_t`
*
* @return MMU entry ID, -1 for invalid
*/
static inline int mmu_ll_find_entry_id_based_on_map_value(uint32_t mmu_id, uint32_t mmu_val, mmu_target_t target)
{
(void)mmu_id;
for (int i = 0; i < MMU_ENTRY_NUM; i++) {
if (mmu_ll_check_entry_valid(mmu_id, i)) {
if (mmu_ll_get_entry_target(mmu_id, i) == target) {
if (((*(uint32_t *)(DR_REG_MMU_TABLE + i * 4)) & MMU_VALID_VAL_MASK) == mmu_val) {
return i;
}
}
}
}
return -1;
}
/**
* Convert MMU entry ID to vaddr base
*
* @param mmu_id MMU ID
* @param entry_id MMU entry ID
* @param type virtual address type, could be instruction type or data type. See `mmu_vaddr_t`
*/
static inline uint32_t mmu_ll_entry_id_to_vaddr_base(uint32_t mmu_id, uint32_t entry_id, mmu_vaddr_t type)
{
(void)mmu_id;
uint32_t laddr = entry_id << 16;
return mmu_ll_laddr_to_vaddr(laddr, type);
} }
#ifdef __cplusplus #ifdef __cplusplus

View File

@@ -36,6 +36,14 @@ void cache_hal_disable(cache_type_t type);
*/ */
void cache_hal_enable(cache_type_t type); void cache_hal_enable(cache_type_t type);
/**
* Invalidate a Cache item for either ICache or DCache.
*
* @param vaddr Start address of the region to be invalidated
* @param size Size of the region to be invalidated
*/
void cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -6,6 +6,7 @@
#pragma once #pragma once
#include <esp_types.h>
#include "hal/mmu_types.h" #include "hal/mmu_types.h"
#ifdef __cplusplus #ifdef __cplusplus
@@ -17,7 +18,6 @@ extern "C" {
*/ */
void mmu_hal_init(void); void mmu_hal_init(void);
#if !CONFIG_IDF_TARGET_ESP32
/** /**
* Helper functions to convert the MMU page numbers into bytes. e.g.: * Helper functions to convert the MMU page numbers into bytes. e.g.:
* - When MMU page size is 16KB, page_num = 2 will be converted into 32KB * - When MMU page size is 16KB, page_num = 2 will be converted into 32KB
@@ -45,7 +45,7 @@ uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num);
uint32_t mmu_hal_bytes_to_pages(uint32_t mmu_id, uint32_t bytes); uint32_t mmu_hal_bytes_to_pages(uint32_t mmu_id, uint32_t bytes);
/** /**
* To map a virtual address region to a physical memory region * To map a virtual address block to a physical memory block
* *
* @param mmu_id MMU ID * @param mmu_id MMU ID
* @param mem_type physical memory type, see `mmu_target_t` * @param mem_type physical memory type, see `mmu_target_t`
@@ -57,7 +57,47 @@ uint32_t mmu_hal_bytes_to_pages(uint32_t mmu_id, uint32_t bytes);
* @note vaddr and paddr should be aligned with the mmu page size, see CONFIG_MMU_PAGE_SIZE * @note vaddr and paddr should be aligned with the mmu page size, see CONFIG_MMU_PAGE_SIZE
*/ */
void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr, uint32_t paddr, uint32_t len, uint32_t *out_len); void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr, uint32_t paddr, uint32_t len, uint32_t *out_len);
#endif
/**
* To unmap a virtual address block that is mapped to a physical memory block previously
*
* @param[in] mmu_id MMU ID
* @param[in] vaddr start virtual address
* @param[in] len length to be unmapped, in bytes
*/
void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len);
/**
* Convert virtual address to physical address
*
* @param mmu_id MMU ID
* @param vaddr virtual address
* @param[out] out_paddr physical address
* @param[out] out_target Indicating the vaddr/paddr is mapped on which target, see `mmu_target_t`
*
* @return
* - true: virtual address is valid
* - false: virtual address isn't valid
*/
bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target);
/**
* Convert physical address to virtual address
*
* @note This function can only find the first match virtual address.
* However it is possible that a physical address is mapped to multiple virtual addresses.
*
* @param mmu_id MMU ID
* @param paddr physical address
* @param target physical memory target, see `mmu_target_t`
* @param type virtual address type, could be instruction or data
* @param[out] out_vaddr virtual address
*
* @return
* - true: found a matched vaddr
* - false: not found a matched vaddr
*/
bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr);
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@@ -6,11 +6,20 @@
#pragma once #pragma once
#include "esp_bit_defs.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
typedef enum {
MMU_MEM_CAP_EXEC = BIT(0),
MMU_MEM_CAP_READ = BIT(1),
MMU_MEM_CAP_WRITE = BIT(2),
MMU_MEM_CAP_32BIT = BIT(3),
MMU_MEM_CAP_8BIT = BIT(4),
} mmu_mem_caps_t;
/** /**
* MMU Page size * MMU Page size
*/ */
@@ -33,8 +42,8 @@ typedef enum {
* External physical memory * External physical memory
*/ */
typedef enum { typedef enum {
MMU_TARGET_FLASH0, MMU_TARGET_FLASH0 = BIT(0),
MMU_TARGET_PSRAM0, MMU_TARGET_PSRAM0 = BIT(1),
} mmu_target_t; } mmu_target_t;
/** /**

View File

@@ -13,23 +13,6 @@
#include "hal/mmu_hal.h" #include "hal/mmu_hal.h"
#include "hal/mmu_ll.h" #include "hal/mmu_ll.h"
#if CONFIG_IDF_TARGET_ESP32
#include "esp32/rom/cache.h"
#include "soc/dport_reg.h"
#elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32C3
#include "esp32c3/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32C2
#include "esp32c2/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32H4
#include "esp32h4/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32C6
#include "esp32c6/rom/cache.h"
#endif
void mmu_hal_init(void) void mmu_hal_init(void)
{ {
mmu_ll_unmap_all(0); mmu_ll_unmap_all(0);
@@ -38,8 +21,6 @@ void mmu_hal_init(void)
#endif #endif
} }
#if !CONFIG_IDF_TARGET_ESP32
//If decided, add a jira ticket for implementing these APIs on ESP32
uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num) uint32_t mmu_hal_pages_to_bytes(uint32_t mmu_id, uint32_t page_num)
{ {
mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id); mmu_page_size_t page_size = mmu_ll_get_page_size(mmu_id);
@@ -85,7 +66,7 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr,
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1); uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
HAL_ASSERT(vaddr % page_size_in_bytes == 0); HAL_ASSERT(vaddr % page_size_in_bytes == 0);
HAL_ASSERT(paddr % page_size_in_bytes == 0); HAL_ASSERT(paddr % page_size_in_bytes == 0);
HAL_ASSERT((paddr + len - 1) < mmu_hal_pages_to_bytes(mmu_id, MMU_MAX_PADDR_PAGE_NUM)); HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, len));
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, len)); HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, len));
uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes; uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
@@ -93,7 +74,7 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr,
uint32_t mmu_val; //This is the physical address in the format that MMU supported uint32_t mmu_val; //This is the physical address in the format that MMU supported
*out_len = mmu_hal_pages_to_bytes(mmu_id, page_num); *out_len = mmu_hal_pages_to_bytes(mmu_id, page_num);
mmu_val = mmu_ll_format_paddr(mmu_id, paddr); mmu_val = mmu_ll_format_paddr(mmu_id, paddr, mem_type);
while (page_num) { while (page_num) {
entry_id = mmu_ll_get_entry_id(mmu_id, vaddr); entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
@@ -103,4 +84,58 @@ void mmu_hal_map_region(uint32_t mmu_id, mmu_target_t mem_type, uint32_t vaddr,
page_num--; page_num--;
} }
} }
#endif //#if !CONFIG_IDF_TARGET_ESP32
void mmu_hal_unmap_region(uint32_t mmu_id, uint32_t vaddr, uint32_t len)
{
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
HAL_ASSERT(vaddr % page_size_in_bytes == 0);
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, len));
uint32_t page_num = (len + page_size_in_bytes - 1) / page_size_in_bytes;
uint32_t entry_id = 0;
while (page_num) {
entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
mmu_ll_set_entry_invalid(mmu_id, entry_id);
vaddr += page_size_in_bytes;
page_num--;
}
}
bool mmu_hal_vaddr_to_paddr(uint32_t mmu_id, uint32_t vaddr, uint32_t *out_paddr, mmu_target_t *out_target)
{
HAL_ASSERT(mmu_ll_check_valid_ext_vaddr_region(mmu_id, vaddr, 1));
uint32_t entry_id = mmu_ll_get_entry_id(mmu_id, vaddr);
if (!mmu_ll_check_entry_valid(mmu_id, entry_id)) {
return false;
}
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
uint32_t offset = (uint32_t)vaddr % page_size_in_bytes;
*out_target = mmu_ll_get_entry_target(mmu_id, entry_id);
uint32_t paddr_base = mmu_ll_entry_id_to_paddr_base(mmu_id, entry_id);
*out_paddr = paddr_base | offset;
return true;
}
bool mmu_hal_paddr_to_vaddr(uint32_t mmu_id, uint32_t paddr, mmu_target_t target, mmu_vaddr_t type, uint32_t *out_vaddr)
{
HAL_ASSERT(mmu_ll_check_valid_paddr_region(mmu_id, paddr, 1));
uint32_t mmu_val = mmu_ll_format_paddr(mmu_id, paddr, target);
int entry_id = mmu_ll_find_entry_id_based_on_map_value(mmu_id, mmu_val, target);
if (entry_id == -1) {
return false;
}
uint32_t page_size_in_bytes = mmu_hal_pages_to_bytes(mmu_id, 1);
uint32_t offset = paddr % page_size_in_bytes;
uint32_t vaddr_base = mmu_ll_entry_id_to_vaddr_base(mmu_id, entry_id, type);
if (vaddr_base == 0) {
return false;
}
*out_vaddr = vaddr_base | offset;
return true;
}

View File

@@ -227,10 +227,6 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool bool
default y default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 5
config SOC_CPU_CORES_NUM config SOC_CPU_CORES_NUM
int int
default 2 default 2
@@ -443,6 +439,14 @@ config SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP
int int
default 3 default 3
config SOC_MMU_PERIPH_NUM
int
default 2
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 3
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool bool
default n default n

View File

@@ -39,7 +39,18 @@ extern "C" {
#define MMU_INVALID BIT(8) #define MMU_INVALID BIT(8)
//MMU entry num, 384 entries that are used in IDF /**
* Max MMU available paddr page num.
* `MMU_MAX_PADDR_PAGE_NUM * CONFIG_MMU_PAGE_SIZE` means the max paddr address supported by the MMU. e.g.:
* 256 * 64KB, means MMU can support 16MB paddr at most
*/
#define MMU_MAX_PADDR_PAGE_NUM 256
/**
* This is the mask used for mapping. e.g.:
* 0x4008_0000 & MMU_VADDR_MASK
*/
#define MMU_VADDR_MASK 0x3FFFFF
//MMU entry num, 384 entries that are used in IDF for Flash
#define MMU_ENTRY_NUM 384 #define MMU_ENTRY_NUM 384

View File

@@ -136,11 +136,8 @@
#define SOC_BROWNOUT_RESET_SUPPORTED 1 #define SOC_BROWNOUT_RESET_SUPPORTED 1
#endif #endif
/*-------------------------- CACHE CAPS --------------------------------------*/
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data #define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 5
/*-------------------------- CPU CAPS ----------------------------------------*/ /*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2 #define SOC_CPU_CORES_NUM 2
@@ -231,6 +228,10 @@
#define SOC_MCPWM_CAPTURE_CHANNELS_PER_TIMER (3) ///< The number of capture channels that each capture timer has #define SOC_MCPWM_CAPTURE_CHANNELS_PER_TIMER (3) ///< The number of capture channels that each capture timer has
#define SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP (3) ///< The number of GPIO synchros that each group has #define SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP (3) ///< The number of GPIO synchros that each group has
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PERIPH_NUM 2
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 3
/*-------------------------- MPU CAPS ----------------------------------------*/ /*-------------------------- MPU CAPS ----------------------------------------*/
//TODO: correct the caller and remove unsupported lines //TODO: correct the caller and remove unsupported lines
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0 #define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0

View File

@@ -199,10 +199,6 @@ config SOC_CPU_IDRAM_SPLIT_USING_PMP
bool bool
default y default y
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_GDMA_GROUPS config SOC_GDMA_GROUPS
int int
default 1 default 1
@@ -307,6 +303,18 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int int
default 1 default 1
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool bool
default n default n

View File

@@ -3,40 +3,42 @@
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#ifndef _CACHE_MEMORY_H_ #pragma once
#define _CACHE_MEMORY_H_
#include <stdint.h>
#include "sdkconfig.h"
#include "esp_bit_defs.h" #include "esp_bit_defs.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#include <stdint.h>
/*IRAM0 is connected with Cache IBUS0*/ /*IRAM0 is connected with Cache IBUS0*/
#define IRAM0_ADDRESS_LOW 0x4037C000 #define IRAM0_ADDRESS_LOW 0x4037C000
#define IRAM0_ADDRESS_HIGH 0x403C0000 #define IRAM0_ADDRESS_HIGH 0x403C0000
#define IRAM0_CACHE_ADDRESS_LOW 0x42000000 #define IRAM0_CACHE_ADDRESS_LOW 0x42000000
#define IRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_LOW + ((page_size) * 64)) // MMU has 64 pages #define IRAM0_CACHE_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM)) // MMU has 64 pages
/*DRAM0 is connected with Cache DBUS0*/ /*DRAM0 is connected with Cache DBUS0*/
#define DRAM0_ADDRESS_LOW 0x3FCA0000 #define DRAM0_ADDRESS_LOW 0x3FCA0000
#define DRAM0_ADDRESS_HIGH 0x3FCE0000 #define DRAM0_ADDRESS_HIGH 0x3FCE0000
#define DRAM0_CACHE_ADDRESS_LOW 0x3C000000 #define DRAM0_CACHE_ADDRESS_LOW 0x3C000000
#define DRAM0_CACHE_ADDRESS_HIGH(page_size) (DRAM0_CACHE_ADDRESS_LOW + ((page_size) * 64)) // MMU has 64 pages #define DRAM0_CACHE_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM)) // MMU has 64 pages
#define DRAM0_CACHE_OPERATION_HIGH(page_size) DRAM0_CACHE_ADDRESS_HIGH(page_size) #define DRAM0_CACHE_OPERATION_HIGH DRAM0_CACHE_ADDRESS_HIGH
#define BUS_SIZE(bus_name, page_size) (bus_name##_ADDRESS_HIGH(page_size) - bus_name##_ADDRESS_LOW) #define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr, page_size) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH(page_size)) #define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
#define ADDRESS_IN_IRAM0(vaddr, page_size) ADDRESS_IN_BUS(IRAM0, vaddr, page_size) #define ADDRESS_IN_IRAM0(vaddr) ADDRESS_IN_BUS(IRAM0, vaddr)
#define ADDRESS_IN_IRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr, page_size) #define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr)
#define ADDRESS_IN_DRAM0(vaddr, page_size) ADDRESS_IN_BUS(DRAM0, vaddr, page_size) #define ADDRESS_IN_DRAM0(vaddr) ADDRESS_IN_BUS(DRAM0, vaddr)
#define ADDRESS_IN_DRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr, page_size) #define ADDRESS_IN_DRAM0_CACHE(vaddr) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr)
#define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE, page_size) #define BUS_IRAM0_CACHE_SIZE BUS_SIZE(IRAM0_CACHE)
#define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE, page_size) #define BUS_DRAM0_CACHE_SIZE BUS_SIZE(DRAM0_CACHE)
#define CACHE_IBUS 0 #define CACHE_IBUS 0
#define CACHE_IBUS_MMU_START 0 #define CACHE_IBUS_MMU_START 0
@@ -71,9 +73,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000 #define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000 #define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/** /**
* MMU entry valid bit mask for mapping value. For an entry: * MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits * valid bit + value bits
@@ -91,7 +90,7 @@ extern "C" {
* This is the mask used for mapping. e.g.: * This is the mask used for mapping. e.g.:
* 0x4200_0000 & MMU_VADDR_MASK * 0x4200_0000 & MMU_VADDR_MASK
*/ */
#define MMU_VADDR_MASK(page_size) ((page_size) * 64 - 1) #define MMU_VADDR_MASK ((CONFIG_MMU_PAGE_SIZE) * 64 - 1)
//MMU entry num //MMU entry num
#define MMU_ENTRY_NUM 64 #define MMU_ENTRY_NUM 64
@@ -104,9 +103,65 @@ extern "C" {
#define CACHE_MEMORY_IBANK0_ADDR 0x4037C000 #define CACHE_MEMORY_IBANK0_ADDR 0x4037C000
#define SOC_MMU_DBUS_VADDR_BASE 0x3C000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
#if (CONFIG_MMU_PAGE_SIZE == 0x10000)
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 64 MMU entries, needs 0x3F to hold it.
*
* Therefore, 0x3F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x3FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x8000)
/**
* - 32KB MMU page size: the last 0x7FFF, which is the offset
* - 64 MMU entries, needs 0x3F to hold it.
*
* Therefore, 0x1F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x4000)
/**
* - 16KB MMU page size: the last 0x3FFF, which is the offset
* - 64 MMU entries, needs 0x3F to hold it.
*
* Therefore, 0xF,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0xFFFFF
#endif //CONFIG_MMU_PAGE_SIZE
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif /*_CACHE_MEMORY_H_ */

View File

@@ -97,9 +97,6 @@
#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1 #define SOC_CPU_IDRAM_SPLIT_USING_PMP 1
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
/*-------------------------- GDMA CAPS -------------------------------------*/ /*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_GDMA_GROUPS (1U) // Number of GDMA groups #define SOC_GDMA_GROUPS (1U) // Number of GDMA groups
#define SOC_GDMA_PAIRS_PER_GROUP (1U) // Number of GDMA pairs in each group #define SOC_GDMA_PAIRS_PER_GROUP (1U) // Number of GDMA pairs in each group
@@ -154,6 +151,11 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1) #define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading #define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/ /*-------------------------- MPU CAPS ----------------------------------------*/
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0 #define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0
#define SOC_MPU_MIN_REGION_SIZE 0x20000000U #define SOC_MPU_MIN_REGION_SIZE 0x20000000U

View File

@@ -439,6 +439,14 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int int
default 1 default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool bool
default n default n

View File

@@ -69,9 +69,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000 #define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000 #define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/** /**
* MMU entry valid bit mask for mapping value. For an entry: * MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits * valid bit + value bits
@@ -99,6 +96,45 @@ extern "C" {
#define CACHE_MEMORY_IBANK0_ADDR 0x4037c000 #define CACHE_MEMORY_IBANK0_ADDR 0x4037c000
#define SOC_MMU_DBUS_VADDR_BASE 0x3C000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -204,6 +204,10 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1) #define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading #define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/ /*-------------------------- MPU CAPS ----------------------------------------*/
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0 #define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0
#define SOC_MPU_MIN_REGION_SIZE 0x20000000U #define SOC_MPU_MIN_REGION_SIZE 0x20000000U

View File

@@ -311,10 +311,6 @@ config SOC_CPU_IDRAM_SPLIT_USING_PMP
bool bool
default y default y
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_DS_SIGNATURE_MAX_BIT_LEN config SOC_DS_SIGNATURE_MAX_BIT_LEN
int int
default 3072 default 3072
@@ -511,6 +507,18 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int int
default 16 default 16
config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool
default y
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_DI_VADDR_SHARED config SOC_MMU_DI_VADDR_SHARED
bool bool
default y default y

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -13,35 +13,24 @@
extern "C" { extern "C" {
#endif #endif
#define IRAM0_ADDRESS_LOW 0x40800000
#define IRAM0_ADDRESS_HIGH 0x40880000
#define IRAM0_CACHE_ADDRESS_LOW 0x42000000 #define IRAM0_CACHE_ADDRESS_LOW 0x42000000
#define IRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_LOW + ((page_size) * 256)) #define IRAM0_CACHE_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM))
#define DRAM0_ADDRESS_LOW IRAM0_ADDRESS_LOW //I/D share the same vaddr range
#define DRAM0_ADDRESS_HIGH IRAM0_ADDRESS_HIGH //I/D share the same vaddr range
#define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_LOW //I/D share the same vaddr range #define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_LOW //I/D share the same vaddr range
#define DRAM0_CACHE_ADDRESS_HIGH(page_size) IRAM0_CACHE_ADDRESS_HIGH(page_size) //I/D share the same vaddr range #define DRAM0_CACHE_ADDRESS_HIGH IRAM0_CACHE_ADDRESS_HIGH //I/D share the same vaddr range
#define DRAM0_CACHE_OPERATION_HIGH(page_size) DRAM0_CACHE_ADDRESS_HIGH(page_size) #define DRAM0_CACHE_OPERATION_HIGH DRAM0_CACHE_ADDRESS_HIGH
#define BUS_SIZE(bus_name, page_size) (bus_name##_ADDRESS_HIGH(page_size) - bus_name##_ADDRESS_LOW) #define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr, page_size) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH(page_size)) #define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
#define ADDRESS_IN_IRAM0(vaddr, page_size) ADDRESS_IN_BUS(IRAM0, vaddr, page_size) #define ADDRESS_IN_IRAM0(vaddr) ADDRESS_IN_BUS(IRAM0, vaddr)
#define ADDRESS_IN_IRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr, page_size) #define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr)
#define ADDRESS_IN_DRAM0(vaddr, page_size) ADDRESS_IN_BUS(DRAM0, vaddr, page_size) #define ADDRESS_IN_DRAM0(vaddr) ADDRESS_IN_BUS(DRAM0, vaddr)
#define ADDRESS_IN_DRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr, page_size) #define ADDRESS_IN_DRAM0_CACHE(vaddr) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr)
#define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE, page_size) #define BUS_IRAM0_CACHE_SIZE BUS_SIZE(IRAM0_CACHE)
#define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE, page_size) #define BUS_DRAM0_CACHE_SIZE BUS_SIZE(DRAM0_CACHE)
#define CACHE_IBUS 0
#define CACHE_IBUS_MMU_START 0
#define CACHE_IBUS_MMU_END 0x200
#define CACHE_DBUS 1
#define CACHE_DBUS_MMU_START 0
#define CACHE_DBUS_MMU_END 0x200
//TODO, remove these cache function dependencies //TODO, remove these cache function dependencies
#define CACHE_IROM_MMU_START 0 #define CACHE_IROM_MMU_START 0
@@ -66,11 +55,9 @@ extern "C" {
#define MMU_MSPI_SENSITIVE BIT(10) #define MMU_MSPI_SENSITIVE BIT(10)
#define MMU_ACCESS_FLASH MMU_MSPI_ACCESS_FLASH #define MMU_ACCESS_FLASH MMU_MSPI_ACCESS_FLASH
#define MMU_ACCESS_SPIRAM MMU_MSPI_ACCESS_SPIRAM
#define MMU_VALID MMU_MSPI_VALID #define MMU_VALID MMU_MSPI_VALID
#define MMU_SENSITIVE MMU_MSPI_SENSITIVE #define MMU_SENSITIVE MMU_MSPI_SENSITIVE
// ESP32C6-TODO
#define MMU_INVALID_MASK MMU_MSPI_VALID #define MMU_INVALID_MASK MMU_MSPI_VALID
#define MMU_INVALID MMU_MSPI_INVALID #define MMU_INVALID MMU_MSPI_INVALID
@@ -79,9 +66,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000 #define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000 #define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/** /**
* MMU entry valid bit mask for mapping value. For an entry: * MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits * valid bit + value bits
@@ -101,10 +85,70 @@ extern "C" {
* This is the mask used for mapping. e.g.: * This is the mask used for mapping. e.g.:
* 0x4200_0000 & MMU_VADDR_MASK * 0x4200_0000 & MMU_VADDR_MASK
*/ */
#define MMU_VADDR_MASK(page_size) ((page_size) * MMU_ENTRY_NUM - 1) #define MMU_VADDR_MASK ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM - 1)
#define CACHE_MEMORY_IBANK0_ADDR 0x40800000 #define CACHE_MEMORY_IBANK0_ADDR 0x40800000
#define SOC_MMU_DBUS_VADDR_BASE 0x42000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
#if (CONFIG_MMU_PAGE_SIZE == 0x10000)
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x8000)
/**
* - 32KB MMU page size: the last 0x7FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x3F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x3FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x4000)
/**
* - 16KB MMU page size: the last 0x3FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x1F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFF
#endif //CONFIG_MMU_PAGE_SIZE
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -23,7 +23,6 @@ extern "C" {
#define SOC_MMU_INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL #define SOC_MMU_INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL
#define SOC_MMU_ADDR_MASK (MMU_VALID - 1) #define SOC_MMU_ADDR_MASK (MMU_VALID - 1)
#define SOC_MMU_PAGE_IN_FLASH(page) (page) //Always in Flash #define SOC_MMU_PAGE_IN_FLASH(page) (page) //Always in Flash
#define SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE FLASH_MMU_TABLE
#define SOC_MMU_VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW #define SOC_MMU_VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW
#define SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE SOC_MMU_IROM0_PAGES_START #define SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE SOC_MMU_IROM0_PAGES_START
#define SOC_MMU_VADDR0_START_ADDR (SOC_IROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE)) #define SOC_MMU_VADDR0_START_ADDR (SOC_IROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE))

View File

@@ -134,10 +134,6 @@
#define SOC_CPU_HAS_PMA 1 #define SOC_CPU_HAS_PMA 1
#define SOC_CPU_IDRAM_SPLIT_USING_PMP 1 #define SOC_CPU_IDRAM_SPLIT_USING_PMP 1
// TODO: IDF-5339 (Copy from esp32c3, need check)
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
// TODO: IDF-5360 (Copy from esp32c3, need check) // TODO: IDF-5360 (Copy from esp32c3, need check)
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/ /*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/
/** The maximum length of a Digital Signature in bits. */ /** The maximum length of a Digital Signature in bits. */
@@ -233,6 +229,9 @@
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (16) #define SOC_LEDC_GAMMA_FADE_RANGE_MAX (16)
/*-------------------------- MMU CAPS ----------------------------------------*/ /*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
#define SOC_MMU_PERIPH_NUM (1U)
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_DI_VADDR_SHARED (1) /*!< D/I vaddr are shared */ #define SOC_MMU_DI_VADDR_SHARED (1) /*!< D/I vaddr are shared */
/*-------------------------- MPU CAPS ----------------------------------------*/ /*-------------------------- MPU CAPS ----------------------------------------*/

View File

@@ -223,6 +223,18 @@ config SOC_MMU_PAGE_SIZE_CONFIGURABLE
bool bool
default y default y
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_DI_VADDR_SHARED
bool
default y
config SOC_DS_SIGNATURE_MAX_BIT_LEN config SOC_DS_SIGNATURE_MAX_BIT_LEN
int int
default 3072 default 3072

View File

@@ -13,38 +13,23 @@
extern "C" { extern "C" {
#endif #endif
/*IRAM0 is connected with Cache IBUS0*/
#define IRAM0_CACHE_ADDRESS_LOW 0x42000000 #define IRAM0_CACHE_ADDRESS_LOW 0x42000000
#define IRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_LOW + ((page_size) * 128)) // MMU has 256 pages, first 128 for instruction #define IRAM0_CACHE_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_LOW + ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM))
#define IRAM0_ADDRESS_LOW 0x40000000
#define IRAM0_ADDRESS_HIGH(page_size) IRAM0_CACHE_ADDRESS_HIGH(page_size)
/*DRAM0 is connected with Cache DBUS0*/ #define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_LOW //I/D share the same vaddr range
#define DRAM0_ADDRESS_LOW 0x42000000 #define DRAM0_CACHE_ADDRESS_HIGH IRAM0_CACHE_ADDRESS_HIGH //I/D share the same vaddr range
#define DRAM0_ADDRESS_HIGH 0x43000000 #define DRAM0_CACHE_OPERATION_HIGH DRAM0_CACHE_ADDRESS_HIGH
#define DRAM0_CACHE_ADDRESS_LOW IRAM0_CACHE_ADDRESS_HIGH(CONFIG_MMU_PAGE_SIZE) // ESP32H2-TODO : IDF-6370
#define DRAM0_CACHE_ADDRESS_HIGH(page_size) (IRAM0_CACHE_ADDRESS_HIGH(page_size) + ((page_size) * 128)) // MMU has 256 pages, second 128 for data
#define DRAM0_CACHE_OPERATION_HIGH(page_size) DRAM0_CACHE_ADDRESS_HIGH(page_size)
#define ESP_CACHE_TEMP_ADDR 0x42000000
#define BUS_SIZE(bus_name, page_size) (bus_name##_ADDRESS_HIGH(page_size) - bus_name##_ADDRESS_LOW) #define BUS_SIZE(bus_name) (bus_name##_ADDRESS_HIGH - bus_name##_ADDRESS_LOW)
#define ADDRESS_IN_BUS(bus_name, vaddr, page_size) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH(page_size)) #define ADDRESS_IN_BUS(bus_name, vaddr) ((vaddr) >= bus_name##_ADDRESS_LOW && (vaddr) < bus_name##_ADDRESS_HIGH)
#define ADDRESS_IN_IRAM0(vaddr, page_size) ADDRESS_IN_BUS(IRAM0, vaddr, page_size) #define ADDRESS_IN_IRAM0(vaddr) ADDRESS_IN_BUS(IRAM0, vaddr)
#define ADDRESS_IN_IRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr, page_size) #define ADDRESS_IN_IRAM0_CACHE(vaddr) ADDRESS_IN_BUS(IRAM0_CACHE, vaddr)
#define ADDRESS_IN_DRAM0(vaddr, page_size) ADDRESS_IN_BUS(DRAM0, vaddr, page_size) #define ADDRESS_IN_DRAM0(vaddr) ADDRESS_IN_BUS(DRAM0, vaddr)
#define ADDRESS_IN_DRAM0_CACHE(vaddr, page_size) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr, page_size) #define ADDRESS_IN_DRAM0_CACHE(vaddr) ADDRESS_IN_BUS(DRAM0_CACHE, vaddr)
#define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE, page_size) #define BUS_IRAM0_CACHE_SIZE(page_size) BUS_SIZE(IRAM0_CACHE)
#define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE, page_size) #define BUS_DRAM0_CACHE_SIZE(page_size) BUS_SIZE(DRAM0_CACHE)
#define CACHE_IBUS 0
#define CACHE_IBUS_MMU_START 0
#define CACHE_IBUS_MMU_END 0x200
#define CACHE_DBUS 1
#define CACHE_DBUS_MMU_START 0
#define CACHE_DBUS_MMU_END 0x200
//TODO, remove these cache function dependencies //TODO, remove these cache function dependencies
#define CACHE_IROM_MMU_START 0 #define CACHE_IROM_MMU_START 0
@@ -69,11 +54,9 @@ extern "C" {
#define MMU_MSPI_SENSITIVE BIT(10) #define MMU_MSPI_SENSITIVE BIT(10)
#define MMU_ACCESS_FLASH MMU_MSPI_ACCESS_FLASH #define MMU_ACCESS_FLASH MMU_MSPI_ACCESS_FLASH
#define MMU_ACCESS_SPIRAM MMU_MSPI_ACCESS_SPIRAM
#define MMU_VALID MMU_MSPI_VALID #define MMU_VALID MMU_MSPI_VALID
#define MMU_SENSITIVE MMU_MSPI_SENSITIVE #define MMU_SENSITIVE MMU_MSPI_SENSITIVE
// ESP32H2-TODO : IDF-6251
#define MMU_INVALID_MASK MMU_MSPI_VALID #define MMU_INVALID_MASK MMU_MSPI_VALID
#define MMU_INVALID MMU_MSPI_INVALID #define MMU_INVALID MMU_MSPI_INVALID
@@ -82,9 +65,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000 #define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000 #define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/** /**
* MMU entry valid bit mask for mapping value. For an entry: * MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits * valid bit + value bits
@@ -104,10 +84,70 @@ extern "C" {
* This is the mask used for mapping. e.g.: * This is the mask used for mapping. e.g.:
* 0x4200_0000 & MMU_VADDR_MASK * 0x4200_0000 & MMU_VADDR_MASK
*/ */
#define MMU_VADDR_MASK(page_size) ((page_size) * MMU_ENTRY_NUM - 1) #define MMU_VADDR_MASK ((CONFIG_MMU_PAGE_SIZE) * MMU_ENTRY_NUM - 1)
#define CACHE_MEMORY_IBANK0_ADDR 0x40800000 #define CACHE_MEMORY_IBANK0_ADDR 0x40800000
#define SOC_MMU_DBUS_VADDR_BASE 0x42000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
#if (CONFIG_MMU_PAGE_SIZE == 0x10000)
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x8000)
/**
* - 32KB MMU page size: the last 0x7FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x3F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x3FFFFF
#elif (CONFIG_MMU_PAGE_SIZE == 0x4000)
/**
* - 16KB MMU page size: the last 0x3FFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x1F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x1FFFFF
#endif //CONFIG_MMU_PAGE_SIZE
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -23,7 +23,6 @@ extern "C" {
#define SOC_MMU_INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL #define SOC_MMU_INVALID_ENTRY_VAL MMU_TABLE_INVALID_VAL
#define SOC_MMU_ADDR_MASK (MMU_VALID - 1) #define SOC_MMU_ADDR_MASK (MMU_VALID - 1)
#define SOC_MMU_PAGE_IN_FLASH(page) (page) //Always in Flash #define SOC_MMU_PAGE_IN_FLASH(page) (page) //Always in Flash
#define SOC_MMU_DPORT_PRO_FLASH_MMU_TABLE FLASH_MMU_TABLE
#define SOC_MMU_VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW #define SOC_MMU_VADDR1_START_ADDR IRAM0_CACHE_ADDRESS_LOW
#define SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE SOC_MMU_IROM0_PAGES_START #define SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE SOC_MMU_IROM0_PAGES_START
#define SOC_MMU_VADDR0_START_ADDR (SOC_IROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE)) #define SOC_MMU_VADDR0_START_ADDR (SOC_IROM_LOW + (SOC_MMU_DROM0_PAGES_START * SPI_FLASH_MMU_PAGE_SIZE))

View File

@@ -156,9 +156,9 @@
*/ */
#define SOC_IROM_LOW 0x42000000 #define SOC_IROM_LOW 0x42000000
#define SOC_IROM_HIGH (SOC_IROM_LOW + (CONFIG_MMU_PAGE_SIZE<<7)) #define SOC_IROM_HIGH (SOC_IROM_LOW + (CONFIG_MMU_PAGE_SIZE<<8))
#define SOC_DROM_LOW SOC_IROM_HIGH #define SOC_DROM_LOW SOC_IROM_LOW
#define SOC_DROM_HIGH (SOC_IROM_LOW + (CONFIG_MMU_PAGE_SIZE<<8)) // ESP32H2 MMU-TODO: IDF-6251 #define SOC_DROM_HIGH SOC_IROM_HIGH
#define SOC_IROM_MASK_LOW 0x40000000 #define SOC_IROM_MASK_LOW 0x40000000
#define SOC_IROM_MASK_HIGH 0x4001C400 #define SOC_IROM_MASK_HIGH 0x4001C400
#define SOC_DROM_MASK_LOW 0x4001C400 #define SOC_DROM_MASK_LOW 0x4001C400

View File

@@ -124,9 +124,11 @@
#define SOC_CPU_WATCHPOINTS_NUM 4 #define SOC_CPU_WATCHPOINTS_NUM 4
#define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes #define SOC_CPU_WATCHPOINT_SIZE 0x80000000 // bytes
// TODO: IDF-6370 (Copy from esp32c6, need check)
/*-------------------------- MMU CAPS ----------------------------------------*/ /*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1) #define SOC_MMU_PAGE_SIZE_CONFIGURABLE (1)
#define SOC_MMU_PERIPH_NUM (1U)
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_DI_VADDR_SHARED (1) /*!< D/I vaddr are shared */
// TODO: IDF-6285 (Copy from esp32c6, need check) // TODO: IDF-6285 (Copy from esp32c6, need check)
/*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/ /*-------------------------- DIGITAL SIGNATURE CAPS ----------------------------------------*/

View File

@@ -415,6 +415,14 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int int
default 1 default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool bool
default n default n

View File

@@ -69,9 +69,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000 #define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000 #define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/** /**
* MMU entry valid bit mask for mapping value. For an entry: * MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits * valid bit + value bits
@@ -99,6 +96,45 @@ extern "C" {
#define CACHE_MEMORY_IBANK0_ADDR 0x4037c000 #define CACHE_MEMORY_IBANK0_ADDR 0x4037c000
#define SOC_MMU_DBUS_VADDR_BASE 0x3C000000
#define SOC_MMU_IBUS_VADDR_BASE 0x42000000
/*------------------------------------------------------------------------------
* MMU Linear Address
*----------------------------------------------------------------------------*/
/**
* - 64KB MMU page size: the last 0xFFFF, which is the offset
* - 128 MMU entries, needs 0x7F to hold it.
*
* Therefore, 0x7F,FFFF
*/
#define SOC_MMU_LINEAR_ADDR_MASK 0x7FFFFF
/**
* - If high linear address isn't 0, this means MMU can recognize these addresses
* - If high linear address is 0, this means MMU linear address range is equal or smaller than vaddr range.
* Under this condition, we use the max linear space.
*/
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW (IRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (IRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_IRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW (DRAM0_CACHE_ADDRESS_LOW & SOC_MMU_LINEAR_ADDR_MASK)
#if ((DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK) > 0)
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (DRAM0_CACHE_ADDRESS_HIGH & SOC_MMU_LINEAR_ADDR_MASK)
#else
#define SOC_MMU_DRAM0_LINEAR_ADDRESS_HIGH (SOC_MMU_LINEAR_ADDR_MASK + 1)
#endif
/**
* I/D share the MMU linear address range
*/
_Static_assert(SOC_MMU_IRAM0_LINEAR_ADDRESS_LOW == SOC_MMU_DRAM0_LINEAR_ADDRESS_LOW, "IRAM0 and DRAM0 linear address should be same");
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -211,6 +211,10 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1) #define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading #define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/ /*-------------------------- MPU CAPS ----------------------------------------*/
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0 #define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0
#define SOC_MPU_MIN_REGION_SIZE 0x20000000U #define SOC_MPU_MIN_REGION_SIZE 0x20000000U

View File

@@ -255,10 +255,6 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool bool
default y default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 6
config SOC_CP_DMA_MAX_BUFFER_SIZE config SOC_CP_DMA_MAX_BUFFER_SIZE
int int
default 4095 default 4095
@@ -455,6 +451,14 @@ config SOC_LEDC_GAMMA_FADE_RANGE_MAX
int int
default 1 default 1
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 5
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED config SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED
bool bool
default n default n

View File

@@ -98,9 +98,6 @@ extern "C" {
#define MMU_ACCESS_FLASH BIT(15) #define MMU_ACCESS_FLASH BIT(15)
#define MMU_ACCESS_SPIRAM BIT(16) #define MMU_ACCESS_SPIRAM BIT(16)
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/** /**
* MMU entry valid bit mask for mapping value. For an entry: * MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits * valid bit + value bits

View File

@@ -119,9 +119,6 @@
/*-------------------------- BROWNOUT CAPS -----------------------------------*/ /*-------------------------- BROWNOUT CAPS -----------------------------------*/
#define SOC_BROWNOUT_RESET_SUPPORTED 1 #define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 6
/*-------------------------- CP-DMA CAPS -------------------------------------*/ /*-------------------------- CP-DMA CAPS -------------------------------------*/
#define SOC_CP_DMA_MAX_BUFFER_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */ #define SOC_CP_DMA_MAX_BUFFER_SIZE (4095) /*!< Maximum size of the buffer that can be attached to descriptor */
@@ -211,6 +208,10 @@
#define SOC_LEDC_SUPPORT_FADE_STOP (1) #define SOC_LEDC_SUPPORT_FADE_STOP (1)
#define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading #define SOC_LEDC_GAMMA_FADE_RANGE_MAX (1U) // The target does not support gamma curve fading
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 5
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/ /*-------------------------- MPU CAPS ----------------------------------------*/
//TODO: correct the caller and remove unsupported lines //TODO: correct the caller and remove unsupported lines
#define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0 #define SOC_MPU_CONFIGURABLE_REGIONS_SUPPORTED 0

View File

@@ -299,10 +299,6 @@ config SOC_BROWNOUT_RESET_SUPPORTED
bool bool
default y default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_CPU_CORES_NUM config SOC_CPU_CORES_NUM
int int
default 2 default 2
@@ -539,6 +535,14 @@ config SOC_MCPWM_SWSYNC_CAN_PROPAGATE
bool bool
default y default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int
default 1
config SOC_MMU_PERIPH_NUM
int
default 1
config SOC_PCNT_GROUPS config SOC_PCNT_GROUPS
int int
default 1 default 1

View File

@@ -69,9 +69,6 @@ extern "C" {
#define CACHE_MAX_SYNC_NUM 0x400000 #define CACHE_MAX_SYNC_NUM 0x400000
#define CACHE_MAX_LOCK_NUM 0x8000 #define CACHE_MAX_LOCK_NUM 0x8000
#define FLASH_MMU_TABLE ((volatile uint32_t*) DR_REG_MMU_TABLE)
#define FLASH_MMU_TABLE_SIZE (ICACHE_MMU_SIZE/sizeof(uint32_t))
/** /**
* MMU entry valid bit mask for mapping value. For an entry: * MMU entry valid bit mask for mapping value. For an entry:
* valid bit + value bits * valid bit + value bits

View File

@@ -116,9 +116,6 @@
/*-------------------------- BROWNOUT CAPS -----------------------------------*/ /*-------------------------- BROWNOUT CAPS -----------------------------------*/
#define SOC_BROWNOUT_RESET_SUPPORTED 1 #define SOC_BROWNOUT_RESET_SUPPORTED 1
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
/*-------------------------- CPU CAPS ----------------------------------------*/ /*-------------------------- CPU CAPS ----------------------------------------*/
#define SOC_CPU_CORES_NUM 2 #define SOC_CPU_CORES_NUM 2
#define SOC_CPU_INTR_NUM 32 #define SOC_CPU_INTR_NUM 32
@@ -221,6 +218,10 @@
#define SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP (3) ///< The number of GPIO synchros that each group has #define SOC_MCPWM_GPIO_SYNCHROS_PER_GROUP (3) ///< The number of GPIO synchros that each group has
#define SOC_MCPWM_SWSYNC_CAN_PROPAGATE (1) ///< Software sync event can be routed to its output #define SOC_MCPWM_SWSYNC_CAN_PROPAGATE (1) ///< Software sync event can be routed to its output
/*-------------------------- MMU CAPS ----------------------------------------*/
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM (1U)
#define SOC_MMU_PERIPH_NUM (1U)
/*-------------------------- MPU CAPS ----------------------------------------*/ /*-------------------------- MPU CAPS ----------------------------------------*/
#include "mpu_caps.h" #include "mpu_caps.h"

View File

@@ -45,7 +45,7 @@ else()
"spi_flash_os_func_noos.c") "spi_flash_os_func_noos.c")
list(APPEND srcs ${cache_srcs}) list(APPEND srcs ${cache_srcs})
set(priv_requires bootloader_support app_update soc driver) set(priv_requires bootloader_support app_update soc driver esp_mm)
endif() endif()
idf_component_register(SRCS "${srcs}" idf_component_register(SRCS "${srcs}"

View File

@@ -8,30 +8,30 @@
#include <assert.h> #include <assert.h>
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
#include <freertos/FreeRTOS.h> #include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <freertos/semphr.h>
#include "soc/mmu.h"
#include "sdkconfig.h" #include "sdkconfig.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "esp_memory_utils.h"
#include "spi_flash_mmap.h"
#include "esp_flash_encrypt.h"
#include "esp_log.h" #include "esp_log.h"
#include "esp_private/cache_utils.h"
#include "hal/mmu_ll.h" #include "hal/mmu_ll.h"
#include "esp_rom_spiflash.h" #include "soc/mmu.h"
#include "esp_private/esp_mmu_map_private.h"
#include "esp_mmu_map.h"
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#include "esp_private/mmu_psram_flash.h"
#endif
#include "esp_private/cache_utils.h"
#include "spi_flash_mmap.h"
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
#include "soc/dport_reg.h"
#include "esp32/rom/cache.h" #include "esp32/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32S2 #elif CONFIG_IDF_TARGET_ESP32S2
#include "esp32s2/rom/cache.h" #include "esp32s2/rom/cache.h"
#include "soc/extmem_reg.h"
#elif CONFIG_IDF_TARGET_ESP32S3 #elif CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/cache.h" #include "esp32s3/rom/cache.h"
#include "soc/extmem_reg.h"
#elif CONFIG_IDF_TARGET_ESP32C3 #elif CONFIG_IDF_TARGET_ESP32C3
#include "esp32c3/rom/cache.h" #include "esp32c3/rom/cache.h"
#elif CONFIG_IDF_TARGET_ESP32H4 #elif CONFIG_IDF_TARGET_ESP32H4
@@ -44,21 +44,6 @@
#include "esp32h2/rom/cache.h" #include "esp32h2/rom/cache.h"
#endif #endif
#if CONFIG_SPIRAM
#include "esp_private/esp_psram_extram.h"
#include "esp_private/mmu_psram_flash.h"
#endif
#ifndef NDEBUG
// Enable built-in checks in queue.h in debug builds
#define INVARIANTS
#endif
#include "sys/queue.h"
#define IROM0_PAGES_NUM (SOC_MMU_IROM0_PAGES_END - SOC_MMU_IROM0_PAGES_START)
#define DROM0_PAGES_NUM (SOC_MMU_DROM0_PAGES_END - SOC_MMU_DROM0_PAGES_START)
#define PAGES_LIMIT ((SOC_MMU_IROM0_PAGES_END > SOC_MMU_DROM0_PAGES_END) ? SOC_MMU_IROM0_PAGES_END:SOC_MMU_DROM0_PAGES_END)
#define INVALID_PHY_PAGE(page_size) ((page_size) - 1)
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
extern int _instruction_reserved_start; extern int _instruction_reserved_start;
@@ -72,386 +57,291 @@ extern int _rodata_reserved_end;
#if !CONFIG_SPI_FLASH_ROM_IMPL #if !CONFIG_SPI_FLASH_ROM_IMPL
typedef struct mmap_entry_{
uint32_t handle; typedef struct mmap_block_t {
int page; uint32_t *vaddr_list;
int count; int list_num;
LIST_ENTRY(mmap_entry_) entries; } mmap_block_t;
} mmap_entry_t;
static LIST_HEAD(mmap_entries_head, mmap_entry_) s_mmap_entries_head = esp_err_t spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
LIST_HEAD_INITIALIZER(s_mmap_entries_head);
static uint8_t s_mmap_page_refcnt[SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION] = {0};
static uint32_t s_mmap_last_handle = 0;
static void IRAM_ATTR spi_flash_mmap_init(void)
{
if (s_mmap_page_refcnt[SOC_MMU_DROM0_PAGES_START] != 0) {
return; /* mmap data already initialised */
}
for (int i = 0; i < SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION; ++i) {
uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
uint32_t entry_app = mmu_ll_read_entry(MMU_TABLE_CORE1, i);
if (entry_pro != entry_app) {
// clean up entries used by boot loader
mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
}
#endif
bool entry_pro_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, i);
if (!entry_pro_invalid && (i == SOC_MMU_DROM0_PAGES_START || i == SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE || entry_pro != 0)) {
s_mmap_page_refcnt[i] = 1;
} else {
mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
mmu_ll_set_entry_invalid(MMU_TABLE_CORE1, i);
#endif
}
}
}
static void IRAM_ATTR get_mmu_region(spi_flash_mmap_memory_t memory, int* out_begin, int* out_size,uint32_t* region_addr)
{
if (memory == SPI_FLASH_MMAP_DATA) {
// Vaddr0
*out_begin = SOC_MMU_DROM0_PAGES_START;
*out_size = DROM0_PAGES_NUM;
*region_addr = SOC_MMU_VADDR0_START_ADDR;
} else {
// only part of VAddr1 is usable, so adjust for that
*out_begin = SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE;
*out_size = SOC_MMU_IROM0_PAGES_END - *out_begin;
*region_addr = SOC_MMU_VADDR1_FIRST_USABLE_ADDR;
}
}
esp_err_t IRAM_ATTR spi_flash_mmap(size_t src_addr, size_t size, spi_flash_mmap_memory_t memory,
const void** out_ptr, spi_flash_mmap_handle_t* out_handle) const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
{ {
esp_err_t ret; esp_err_t ret = ESP_FAIL;
if (src_addr & INVALID_PHY_PAGE(CONFIG_MMU_PAGE_SIZE)) { mmu_mem_caps_t caps = 0;
return ESP_ERR_INVALID_ARG; void *ptr = NULL;
} mmap_block_t *block = NULL;
if ((src_addr + size) > g_rom_flashchip.chip_size) { uint32_t *vaddr_list = NULL;
return ESP_ERR_INVALID_ARG;
}
// region which should be mapped
int phys_page = src_addr / SPI_FLASH_MMU_PAGE_SIZE;
int page_count = (size + SPI_FLASH_MMU_PAGE_SIZE - 1) / SPI_FLASH_MMU_PAGE_SIZE;
// prepare a linear pages array to feed into spi_flash_mmap_pages
int *pages = heap_caps_malloc(sizeof(int)*page_count, MALLOC_CAP_INTERNAL);
if (pages == NULL) {
return ESP_ERR_NO_MEM;
}
for (int i = 0; i < page_count; i++) {
pages[i] = (phys_page+i);
}
ret = spi_flash_mmap_pages(pages, page_count, memory, out_ptr, out_handle);
free(pages);
return ret;
}
esp_err_t IRAM_ATTR spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mmap_memory_t memory, block = heap_caps_calloc(1, sizeof(mmap_block_t), MALLOC_CAP_INTERNAL);
const void** out_ptr, spi_flash_mmap_handle_t* out_handle) if (!block) {
{
esp_err_t ret;
const void* temp_ptr = *out_ptr = NULL;
spi_flash_mmap_handle_t temp_handle = *out_handle = (spi_flash_mmap_handle_t)NULL;
bool need_flush = false;
if (!page_count) {
return ESP_ERR_INVALID_ARG;
}
if (!esp_ptr_internal(pages)) {
return ESP_ERR_INVALID_ARG;
}
for (int i = 0; i < page_count; i++) {
if (pages[i] < 0 || pages[i]*SPI_FLASH_MMU_PAGE_SIZE >= g_rom_flashchip.chip_size) {
return ESP_ERR_INVALID_ARG;
}
}
mmap_entry_t* new_entry = (mmap_entry_t*) heap_caps_malloc(sizeof(mmap_entry_t), MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
if (new_entry == 0) {
return ESP_ERR_NO_MEM;
}
spi_flash_disable_interrupts_caches_and_other_cpu();
spi_flash_mmap_init();
// figure out the memory region where we should look for pages
int region_begin; // first page to check
int region_size; // number of pages to check
uint32_t region_addr; // base address of memory region
get_mmu_region(memory,&region_begin,&region_size,&region_addr);
if (region_size < page_count) {
spi_flash_enable_interrupts_caches_and_other_cpu();
return ESP_ERR_NO_MEM;
}
// The following part searches for a range of MMU entries which can be used.
// Algorithm is essentially naïve strstr algorithm, except that unused MMU
// entries are treated as wildcards.
int start;
// the " + 1" is a fix when loop the MMU table pages, because the last MMU page
// is valid as well if it have not been used
int end = region_begin + region_size - page_count + 1;
for (start = region_begin; start < end; ++start) {
int pageno = 0;
int pos;
for (pos = start; pos < start + page_count; ++pos, ++pageno) {
int table_val = (int) mmu_ll_read_entry(MMU_TABLE_CORE0, pos);
uint8_t refcnt = s_mmap_page_refcnt[pos];
if (refcnt != 0 && table_val != SOC_MMU_PAGE_IN_FLASH(pages[pageno])) {
break;
}
}
// whole mapping range matched, bail out
if (pos - start == page_count) {
break;
}
}
// checked all the region(s) and haven't found anything?
if (start == end) {
ret = ESP_ERR_NO_MEM; ret = ESP_ERR_NO_MEM;
goto err;
}
vaddr_list = heap_caps_calloc(1, 1 * sizeof(uint32_t), MALLOC_CAP_INTERNAL);
if (!vaddr_list) {
ret = ESP_ERR_NO_MEM;
goto err;
}
block->vaddr_list = vaddr_list;
if (memory == SPI_FLASH_MMAP_INST) {
caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT;
} else { } else {
// set up mapping using pages caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT;
uint32_t pageno = 0; }
for (int i = start; i != start + page_count; ++i, ++pageno) { ret = esp_mmu_map(src_addr, size, caps, MMU_TARGET_FLASH0, &ptr);
// sanity check: we won't reconfigure entries with non-zero reference count if (ret == ESP_OK) {
uint32_t entry_pro = mmu_ll_read_entry(MMU_TABLE_CORE0, i); vaddr_list[0] = (uint32_t)ptr;
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32 block->list_num = 1;
uint32_t entry_app = mmu_ll_read_entry(MMU_TABLE_CORE1, i);
#endif
assert(s_mmap_page_refcnt[i] == 0 ||
(entry_pro == SOC_MMU_PAGE_IN_FLASH(pages[pageno])
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
&& entry_app == SOC_MMU_PAGE_IN_FLASH(pages[pageno])
#endif
));
if (s_mmap_page_refcnt[i] == 0) {
if (entry_pro != SOC_MMU_PAGE_IN_FLASH(pages[pageno])
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
|| entry_app != SOC_MMU_PAGE_IN_FLASH(pages[pageno])
#endif
) {
mmu_ll_write_entry(MMU_TABLE_CORE0, i, pages[pageno], 0);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32
mmu_ll_write_entry(MMU_TABLE_CORE1, i, pages[pageno], 0);
#endif
#if !CONFIG_IDF_TARGET_ESP32 } else if (ret == ESP_ERR_INVALID_STATE) {
Cache_Invalidate_Addr(region_addr + (i - region_begin) * SPI_FLASH_MMU_PAGE_SIZE, SPI_FLASH_MMU_PAGE_SIZE); /**
#endif * paddr region is mapped already,
need_flush = true; * to keep `flash_mmap.c` original behaviour, we consider this as a valid behaviour.
} * Set `list_num` to 0 so we don't need to call `esp_mmu_unmap` to this one, as `esp_mmu_map`
} * doesn't really create a new handle.
++s_mmap_page_refcnt[i];
}
LIST_INSERT_HEAD(&s_mmap_entries_head, new_entry, entries);
new_entry->page = start;
new_entry->count = page_count;
new_entry->handle = ++s_mmap_last_handle;
temp_handle = new_entry->handle;
temp_ptr = (void*) (region_addr + (start - region_begin) * SPI_FLASH_MMU_PAGE_SIZE);
ret = ESP_OK;
}
/* This is a temporary fix for an issue where some
cache reads may see stale data.
Working on a long term fix that doesn't require invalidating
entire cache.
*/ */
if (need_flush) { block->list_num = 0;
#if CONFIG_IDF_TARGET_ESP32 } else {
#if CONFIG_SPIRAM goto err;
esp_psram_extram_writeback_cache();
#endif // CONFIG_SPIRAM
Cache_Flush(0);
#if !CONFIG_FREERTOS_UNICORE
Cache_Flush(1);
#endif // !CONFIG_FREERTOS_UNICORE
#endif // CONFIG_IDF_TARGET_ESP32
} }
spi_flash_enable_interrupts_caches_and_other_cpu(); *out_ptr = ptr;
if (temp_ptr == NULL) { *out_handle = (uint32_t)block;
free(new_entry);
return ESP_OK;
err:
if (vaddr_list) {
free(vaddr_list);
}
if (block) {
free(block);
} }
*out_ptr = temp_ptr;
*out_handle = temp_handle;
return ret; return ret;
} }
void IRAM_ATTR spi_flash_munmap(spi_flash_mmap_handle_t handle)
static int s_find_non_contiguous_block_nums(const int *pages, int page_count)
{ {
spi_flash_disable_interrupts_caches_and_other_cpu(); int nums = 1;
mmap_entry_t* it; int last_end = pages[0] + 1;
// look for handle in linked list
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) { for (int i = 1; i < page_count; i++) {
if (it->handle == handle) { if (pages[i] != last_end) {
// for each page, decrement reference counter nums++;
// if reference count is zero, disable MMU table entry to }
// facilitate debugging of use-after-free conditions last_end = pages[i] + 1;
for (int i = it->page; i < it->page + it->count; ++i) { }
assert(s_mmap_page_refcnt[i] > 0); return nums;
if (--s_mmap_page_refcnt[i] == 0) { }
mmu_ll_set_entry_invalid(MMU_TABLE_CORE0, i);
#if !CONFIG_FREERTOS_UNICORE && CONFIG_IDF_TARGET_ESP32 static void s_merge_contiguous_pages(const int *pages, uint32_t page_count, int block_nums, int (*out_blocks)[2])
mmu_ll_set_entry_invalid(MMU_TABLE_CORE1, i); {
#endif uint32_t last_end = pages[0] + 1;
int new_array_id = 0;
out_blocks[new_array_id][0] = pages[0];
out_blocks[new_array_id][1] = 1;
for (int i = 1; i < page_count; i++) {
if (pages[i] != last_end) {
new_array_id += 1;
assert(new_array_id < block_nums);
out_blocks[new_array_id][0] = pages[i];
out_blocks[new_array_id][1] = 1;
} else {
out_blocks[new_array_id][1] += 1;
}
last_end = pages[i] + 1;
} }
} }
LIST_REMOVE(it, entries);
break; static void s_pages_to_bytes(int (*blocks)[2], int block_nums)
{
for (int i = 0; i < block_nums; i++) {
blocks[i][0] = blocks[i][0] * CONFIG_MMU_PAGE_SIZE;
blocks[i][1] = blocks[i][1] * CONFIG_MMU_PAGE_SIZE;
} }
} }
spi_flash_enable_interrupts_caches_and_other_cpu();
if (it == NULL) { esp_err_t spi_flash_mmap_pages(const int *pages, size_t page_count, spi_flash_mmap_memory_t memory,
const void** out_ptr, spi_flash_mmap_handle_t* out_handle)
{
esp_err_t ret = ESP_FAIL;
mmu_mem_caps_t caps = 0;
mmap_block_t *block = NULL;
uint32_t *vaddr_list = NULL;
int successful_cnt = 0;
int block_num = s_find_non_contiguous_block_nums(pages, page_count);
int paddr_blocks[block_num][2];
s_merge_contiguous_pages(pages, page_count, block_num, paddr_blocks);
s_pages_to_bytes(paddr_blocks, block_num);
block = heap_caps_calloc(1, sizeof(mmap_block_t), MALLOC_CAP_INTERNAL);
if (!block) {
ret = ESP_ERR_NO_MEM;
goto err;
}
vaddr_list = heap_caps_calloc(1, block_num * sizeof(uint32_t), MALLOC_CAP_INTERNAL);
if (!vaddr_list) {
ret = ESP_ERR_NO_MEM;
goto err;
}
if (memory == SPI_FLASH_MMAP_INST) {
caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT;
} else {
caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT;
}
for (int i = 0; i < block_num; i++) {
void *ptr = NULL;
ret = esp_mmu_map(paddr_blocks[i][0], paddr_blocks[i][1], caps, MMU_TARGET_FLASH0, &ptr);
if (ret == ESP_OK) {
vaddr_list[i] = (uint32_t)ptr;
successful_cnt++;
} else {
/**
* A note for `ret == ESP_ERR_INVALID_STATE`:
* If one of the `*pages` are mapped already, this means we can't find a
* consecutive vaddr block for these `*pages`
*/
goto err;
}
vaddr_list[i] = (uint32_t)ptr;
}
block->vaddr_list = vaddr_list;
block->list_num = successful_cnt;
/**
* We get a contiguous vaddr block, but may contain multiple esp_mmu handles.
* The first handle vaddr is the start address of this contiguous vaddr block.
*/
*out_ptr = (void *)vaddr_list[0];
*out_handle = (uint32_t)block;
return ESP_OK;
err:
for (int i = 0; i < successful_cnt; i++) {
esp_mmu_unmap((void *)vaddr_list[i]);
}
if (vaddr_list) {
free(vaddr_list);
}
if (block) {
free(block);
}
return ret;
}
void spi_flash_munmap(spi_flash_mmap_handle_t handle)
{
esp_err_t ret = ESP_FAIL;
mmap_block_t *block = (void *)handle;
for (int i = 0; i < block->list_num; i++) {
ret = esp_mmu_unmap((void *)block->vaddr_list[i]);
if (ret == ESP_ERR_NOT_FOUND) {
assert(0 && "invalid handle, or handle already unmapped"); assert(0 && "invalid handle, or handle already unmapped");
} }
free(it);
} }
static void IRAM_ATTR NOINLINE_ATTR spi_flash_protected_mmap_init(void) free(block->vaddr_list);
{ free(block);
spi_flash_disable_interrupts_caches_and_other_cpu();
spi_flash_mmap_init();
spi_flash_enable_interrupts_caches_and_other_cpu();
} }
static uint32_t IRAM_ATTR NOINLINE_ATTR spi_flash_protected_read_mmu_entry(int index)
{
uint32_t value;
spi_flash_disable_interrupts_caches_and_other_cpu();
value = mmu_ll_read_entry(MMU_TABLE_CORE0, index);
spi_flash_enable_interrupts_caches_and_other_cpu();
return value;
}
void spi_flash_mmap_dump(void) void spi_flash_mmap_dump(void)
{ {
spi_flash_protected_mmap_init(); esp_mmu_map_dump_mapped_blocks(stdout);
mmap_entry_t* it;
for (it = LIST_FIRST(&s_mmap_entries_head); it != NULL; it = LIST_NEXT(it, entries)) {
printf("handle=%d page=%d count=%d\n", it->handle, it->page, it->count);
}
for (int i = 0; i < SOC_MMU_REGIONS_COUNT * SOC_MMU_PAGES_PER_REGION; ++i) {
if (s_mmap_page_refcnt[i] != 0) {
uint32_t paddr = spi_flash_protected_read_mmu_entry(i);
printf("page %d: refcnt=%d paddr=%d\n", i, (int) s_mmap_page_refcnt[i], paddr);
}
}
} }
uint32_t IRAM_ATTR spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
uint32_t spi_flash_mmap_get_free_pages(spi_flash_mmap_memory_t memory)
{ {
spi_flash_disable_interrupts_caches_and_other_cpu(); mmu_mem_caps_t caps = 0;
spi_flash_mmap_init(); if (memory == SPI_FLASH_MMAP_INST) {
int count = 0; caps = MMU_MEM_CAP_EXEC | MMU_MEM_CAP_32BIT;
int region_begin; // first page to check } else {
int region_size; // number of pages to check caps = MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT;
uint32_t region_addr; // base address of memory region
get_mmu_region(memory,&region_begin,&region_size,&region_addr);
for (int i = region_begin; i < region_begin + region_size; ++i) {
bool entry_is_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, i);
if (s_mmap_page_refcnt[i] == 0 && entry_is_invalid) {
count++;
} }
size_t len = 0;
esp_mmu_map_get_max_consecutive_free_block_size(caps, MMU_TARGET_FLASH0, &len);
return len / CONFIG_MMU_PAGE_SIZE;
} }
spi_flash_enable_interrupts_caches_and_other_cpu();
return count;
}
size_t spi_flash_cache2phys(const void *cached) size_t spi_flash_cache2phys(const void *cached)
{ {
intptr_t c = (intptr_t)cached; if (cached == NULL) {
size_t cache_page;
int offset = 0;
if (c >= SOC_MMU_VADDR1_START_ADDR && c < SOC_MMU_VADDR1_FIRST_USABLE_ADDR) {
/* IRAM address, doesn't map to flash */
return SPI_FLASH_CACHE2PHYS_FAIL; return SPI_FLASH_CACHE2PHYS_FAIL;
} }
if (c < SOC_MMU_VADDR1_FIRST_USABLE_ADDR) {
/* expect cache is in DROM */ esp_err_t ret = ESP_FAIL;
cache_page = (c - SOC_MMU_VADDR0_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + SOC_MMU_DROM0_PAGES_START; uint32_t paddr = 0;
mmu_target_t target = 0;
ret = esp_mmu_vaddr_to_paddr((void *)cached, &paddr, &target);
if (ret != ESP_OK) {
return SPI_FLASH_CACHE2PHYS_FAIL;
}
int offset = 0;
#if CONFIG_SPIRAM_RODATA #if CONFIG_SPIRAM_RODATA
if (c >= (uint32_t)&_rodata_reserved_start && c <= (uint32_t)&_rodata_reserved_end) { if ((uint32_t)cached >= (uint32_t)&_rodata_reserved_start && (uint32_t)cached <= (uint32_t)&_rodata_reserved_end) {
offset = rodata_flash2spiram_offset(); offset = rodata_flash2spiram_offset();
} }
#endif #endif
} else {
/* expect cache is in IROM */
cache_page = (c - SOC_MMU_VADDR1_START_ADDR) / SPI_FLASH_MMU_PAGE_SIZE + SOC_MMU_IROM0_PAGES_START;
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
if (c >= (uint32_t)&_instruction_reserved_start && c <= (uint32_t)&_instruction_reserved_end) { if ((uint32_t)cached >= (uint32_t)&_instruction_reserved_start && (uint32_t)cached <= (uint32_t)&_instruction_reserved_end) {
offset = instruction_flash2spiram_offset(); offset = instruction_flash2spiram_offset();
} }
#endif #endif
return paddr + offset * CONFIG_MMU_PAGE_SIZE;
} }
if (cache_page >= PAGES_LIMIT) {
/* cached address was not in IROM or DROM */
return SPI_FLASH_CACHE2PHYS_FAIL;
}
uint32_t phys_page = spi_flash_protected_read_mmu_entry(cache_page);
bool entry_is_invalid = mmu_ll_get_entry_is_invalid(MMU_TABLE_CORE0, cache_page);
if (entry_is_invalid) {
/* page is not mapped */
return SPI_FLASH_CACHE2PHYS_FAIL;
}
uint32_t phys_offs = ((phys_page & SOC_MMU_ADDR_MASK) + offset) * SPI_FLASH_MMU_PAGE_SIZE;
return phys_offs | (c & (SPI_FLASH_MMU_PAGE_SIZE-1));
}
const void *IRAM_ATTR spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memory_t memory) const void * spi_flash_phys2cache(size_t phys_offs, spi_flash_mmap_memory_t memory)
{ {
uint32_t phys_page = phys_offs / SPI_FLASH_MMU_PAGE_SIZE; esp_err_t ret = ESP_FAIL;
int start, end, page_delta; void *ptr = NULL;
intptr_t base; mmu_target_t target = MMU_TARGET_FLASH0;
if (memory == SPI_FLASH_MMAP_DATA) { __attribute__((unused)) uint32_t phys_page = phys_offs / CONFIG_MMU_PAGE_SIZE;
start = SOC_MMU_DROM0_PAGES_START;
end = SOC_MMU_DROM0_PAGES_END;
base = SOC_MMU_VADDR0_START_ADDR;
page_delta = SOC_MMU_DROM0_PAGES_START;
} else {
start = SOC_MMU_PRO_IRAM0_FIRST_USABLE_PAGE;
end = SOC_MMU_IROM0_PAGES_END;
base = SOC_MMU_VADDR1_START_ADDR;
page_delta = SOC_MMU_IROM0_PAGES_START;
}
spi_flash_disable_interrupts_caches_and_other_cpu();
for (int i = start; i < end; i++) {
uint32_t mmu_value = mmu_ll_read_entry(MMU_TABLE_CORE0, i);
#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
if (phys_page >= instruction_flash_start_page_get() && phys_page <= instruction_flash_end_page_get()) { if (phys_page >= instruction_flash_start_page_get() && phys_page <= instruction_flash_end_page_get()) {
if (mmu_value & MMU_ACCESS_SPIRAM) { target = MMU_TARGET_PSRAM0;
mmu_value += instruction_flash2spiram_offset(); phys_offs -= instruction_flash2spiram_offset() * CONFIG_MMU_PAGE_SIZE;
mmu_value = (mmu_value & SOC_MMU_ADDR_MASK) | MMU_ACCESS_FLASH;
}
} }
#endif #endif
#if CONFIG_SPIRAM_RODATA #if CONFIG_SPIRAM_RODATA
if (phys_page >= rodata_flash_start_page_get() && phys_page <= rodata_flash_start_page_get()) { if (phys_page >= rodata_flash_start_page_get() && phys_page <= rodata_flash_start_page_get()) {
if (mmu_value & MMU_ACCESS_SPIRAM) { target = MMU_TARGET_PSRAM0;
mmu_value += rodata_flash2spiram_offset(); phys_offs -= rodata_flash2spiram_offset() * CONFIG_MMU_PAGE_SIZE;
mmu_value = (mmu_value & SOC_MMU_ADDR_MASK) | MMU_ACCESS_FLASH;
}
} }
#endif #endif
if (mmu_value == SOC_MMU_PAGE_IN_FLASH(phys_page)) {
i -= page_delta; mmu_vaddr_t type = (memory == SPI_FLASH_MMAP_DATA) ? MMU_VADDR_DATA : MMU_VADDR_INSTRUCTION;
intptr_t cache_page = base + (SPI_FLASH_MMU_PAGE_SIZE * i); ret = esp_mmu_paddr_to_vaddr(phys_offs, target, type, &ptr);
spi_flash_enable_interrupts_caches_and_other_cpu(); if (ret == ESP_ERR_NOT_FOUND) {
return (const void *) (cache_page | (phys_offs & (SPI_FLASH_MMU_PAGE_SIZE-1)));
}
}
spi_flash_enable_interrupts_caches_and_other_cpu();
return NULL; return NULL;
} }
assert(ret == ESP_OK);
return (const void *)ptr;
}
static bool IRAM_ATTR is_page_mapped_in_cache(uint32_t phys_page, const void **out_ptr) static bool IRAM_ATTR is_page_mapped_in_cache(uint32_t phys_page, const void **out_ptr)
{ {

View File

@@ -34,8 +34,8 @@ extern "C" {
* @brief Enumeration which specifies memory space requested in an mmap call * @brief Enumeration which specifies memory space requested in an mmap call
*/ */
typedef enum { typedef enum {
SPI_FLASH_MMAP_DATA, /**< map to data memory (Vaddr0), allows byte-aligned access, 4 MB total */ SPI_FLASH_MMAP_DATA, /**< map to data memory, allows byte-aligned access*/
SPI_FLASH_MMAP_INST, /**< map to instruction memory (Vaddr1-3), allows only 4-byte-aligned access, 11 MB total */ SPI_FLASH_MMAP_INST, /**< map to instruction memory, allows only 4-byte-aligned access*/
} spi_flash_mmap_memory_t; } spi_flash_mmap_memory_t;
/** /**

View File

@@ -9,7 +9,7 @@
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
// Some resources are lazy allocated in flash encryption, the threadhold is left for that case // Some resources are lazy allocated in flash encryption, the threadhold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-300) #define TEST_MEMORY_LEAK_THRESHOLD (400)
static size_t before_free_8bit; static size_t before_free_8bit;
static size_t before_free_32bit; static size_t before_free_32bit;

View File

@@ -9,7 +9,7 @@
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
// Some resources are lazy allocated in flash encryption, the threadhold is left for that case // Some resources are lazy allocated in flash encryption, the threadhold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-300) #define TEST_MEMORY_LEAK_THRESHOLD (-400)
static size_t before_free_8bit; static size_t before_free_8bit;
static size_t before_free_32bit; static size_t before_free_32bit;
@@ -37,21 +37,18 @@ void tearDown(void)
void app_main(void) void app_main(void)
{ {
// ####### #######
// # # ## #### # # # #### # # ##### # # ##### ##### # #### # #
// # # # # # # # # # # ## # # # # # # # # # # # ## #
// ##### # # # #### ###### ##### # # # # # # # # # # # # # # # #
// # # ###### # # # # # # # # ##### # ##### # # # # # # #
// # # # # # # # # # # # # ## # # # # # # # # # ##
// # ###### # # #### # # ####### #### # # # # # # # # #### # #
printf(" ####### ####### \n"); // ________ ___ _____ __ __ _______ ______________ ______ ______________ _ __
printf("# # ## #### # # # #### # # ##### # # ##### ##### # #### # #\n"); // / ____/ / / | / ___// / / / / ____/ | / / ____/ __ \ \/ / __ \/_ __/ _/ __ \/ | / /
printf("# # # # # # # # # # ## # # # # # # # # # # # ## #\n"); // / /_ / / / /| | \__ \/ /_/ / / __/ / |/ / / / /_/ /\ / /_/ / / / / // / / / |/ /
printf("##### # # # #### ###### ##### # # # # # # # # # # # # # # # #\n"); // / __/ / /___/ ___ |___/ / __ / / /___/ /| / /___/ _, _/ / / ____/ / / _/ // /_/ / /| /
printf("# # ###### # # # # # # # # ##### # ##### # # # # # # #\n"); // /_/ /_____/_/ |_/____/_/ /_/ /_____/_/ |_/\____/_/ |_| /_/_/ /_/ /___/\____/_/ |_/
printf("# # # # # # # # # # # # ## # # # # # # # # # ##\n");
printf("# ###### # # #### # # ####### #### # # # # # # # # #### # #\n"); printf(" ________ ___ _____ __ __ _______ ______________ ______ ______________ _ __ \n");
printf(" / ____/ / / | / ___// / / / / ____/ | / / ____/ __ \\ \\/ / __ \\/_ __/ _/ __ \\/ | / / \n");
printf(" / /_ / / / /| | \\__ \\/ /_/ / / __/ / |/ / / / /_/ /\\ / /_/ / / / / // / / / |/ / \n");
printf(" / __/ / /___/ ___ |___/ / __ / / /___/ /| / /___/ _, _/ / / ____/ / / _/ // /_/ / /| / \n");
printf("/_/ /_____/_/ |_/____/_/ /_/ /_____/_/ |_/\\____/_/ |_| /_/_/ /_/ /___/\\____/_/ |_/ \n");
unity_run_menu(); unity_run_menu();
} }

View File

@@ -4,4 +4,4 @@ cmake_minimum_required(VERSION 3.16)
set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/tools/unit-test-app/components") set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/tools/unit-test-app/components")
include($ENV{IDF_PATH}/tools/cmake/project.cmake) include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(test_mmap) project(test_flash_mmap)

View File

@@ -1,5 +1,5 @@
set(srcs "test_app_main.c" set(srcs "test_app_main.c"
"test_mmap.c") "test_flash_mmap.c")
# In order for the cases defined by `TEST_CASE` to be linked into the final elf, # In order for the cases defined by `TEST_CASE` to be linked into the final elf,
# the component can be registered as WHOLE_ARCHIVE # the component can be registered as WHOLE_ARCHIVE

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Unlicense OR CC0-1.0 * SPDX-License-Identifier: Unlicense OR CC0-1.0
*/ */
@@ -9,7 +9,7 @@
#include "esp_heap_caps.h" #include "esp_heap_caps.h"
// Some resources are lazy allocated, the threadhold is left for that case // Some resources are lazy allocated, the threadhold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-600) #define TEST_MEMORY_LEAK_THRESHOLD (600)
static size_t before_free_8bit; static size_t before_free_8bit;
static size_t before_free_32bit; static size_t before_free_32bit;

Some files were not shown because too many files have changed in this diff Show More