Merge branch 'fix/fix_mmu_map_concurrent_issue_v5.2' into 'release/v5.2'

mmu: fix mmu map concurrent issue (v5.2)

See merge request espressif/esp-idf!38408
This commit is contained in:
morris
2025-04-17 21:29:25 +08:00
10 changed files with 99 additions and 31 deletions

View File

@ -9,7 +9,7 @@
#include "esp_heap_caps.h"
// iterator to load partition tables in `test spi bus lock, with flash` will lead memory not free
#define TEST_MEMORY_LEAK_THRESHOLD (350)
#define TEST_MEMORY_LEAK_THRESHOLD (450)
void setUp(void)
{

View File

@ -9,7 +9,7 @@
#include "esp_heap_caps.h"
// load partition table in tests will use memory
#define TEST_MEMORY_LEAK_THRESHOLD (450)
#define TEST_MEMORY_LEAK_THRESHOLD (600)
void setUp(void)
{

View File

@ -9,6 +9,7 @@
#include <sys/param.h>
#include <sys/queue.h>
#include <inttypes.h>
#include <sys/lock.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
@ -105,6 +106,10 @@ typedef struct {
* Only the first `num_regions` items are valid
*/
mem_region_t mem_regions[SOC_MMU_LINEAR_ADDRESS_REGION_NUM];
/**
* driver layer mutex lock
*/
_lock_t mutex;
} mmu_ctx_t;
static mmu_ctx_t s_mmu_ctx;
@ -447,17 +452,15 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target,
ESP_RETURN_ON_FALSE((paddr_start % CONFIG_MMU_PAGE_SIZE == 0), ESP_ERR_INVALID_ARG, TAG, "paddr must be rounded up to the nearest multiple of CONFIG_MMU_PAGE_SIZE");
ESP_RETURN_ON_ERROR(s_mem_caps_check(caps), TAG, "invalid caps");
_lock_acquire(&s_mmu_ctx.mutex);
mem_block_t *dummy_head = NULL;
mem_block_t *dummy_tail = NULL;
size_t aligned_size = ALIGN_UP_BY(size, CONFIG_MMU_PAGE_SIZE);
int32_t found_region_id = s_find_available_region(s_mmu_ctx.mem_regions, s_mmu_ctx.num_regions, aligned_size, caps, target);
if (found_region_id == -1) {
ESP_EARLY_LOGE(TAG, "no such vaddr range");
return ESP_ERR_NOT_FOUND;
}
ESP_GOTO_ON_FALSE(found_region_id != -1, ESP_ERR_NOT_FOUND, err, TAG, "no such vaddr range");
//Now we're sure we can find an available block inside a certain region
mem_region_t *found_region = &s_mmu_ctx.mem_regions[found_region_id];
mem_block_t *dummy_head = NULL;
mem_block_t *dummy_tail = NULL;
mem_block_t *new_block = NULL;
if (TAILQ_EMPTY(&found_region->mem_block_head)) {
@ -506,7 +509,6 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target,
}
if (is_enclosed) {
ESP_LOGW(TAG, "paddr block is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
/*
* This condition is triggered when `s_is_enclosed` is true and hence
* we are sure that `paddr_start` >= `mem_block->paddr_start`.
@ -516,12 +518,11 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target,
*/
const uint32_t new_paddr_offset = paddr_start - mem_block->paddr_start;
*out_ptr = (void *)mem_block->vaddr_start + new_paddr_offset;
return ESP_ERR_INVALID_STATE;
ESP_GOTO_ON_FALSE(false, ESP_ERR_INVALID_STATE, err, TAG, "paddr block is mapped already, vaddr_start: %p, size: 0x%x", (void *)mem_block->vaddr_start, mem_block->size);
}
if (!allow_overlap && is_overlapped) {
ESP_LOGE(TAG, "paddr block is overlapped with an already mapped paddr block");
return ESP_ERR_INVALID_ARG;
ESP_GOTO_ON_FALSE(false, ESP_ERR_INVALID_ARG, err, TAG, "paddr block is overlapped with an already mapped paddr block");
}
#endif //#if ENABLE_PADDR_CHECK
@ -556,7 +557,6 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target,
assert(found);
//insert the to-be-mapped new block to the list
TAILQ_INSERT_BEFORE(found_block, new_block, entries);
//Finally, we update the max_slot_size
found_region->max_slot_size = max_slot_len;
@ -578,6 +578,7 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target,
//do mapping
s_do_mapping(target, new_block->vaddr_start, paddr_start, aligned_size);
*out_ptr = (void *)new_block->vaddr_start;
_lock_release(&s_mmu_ctx.mutex);
return ESP_OK;
@ -588,6 +589,7 @@ err:
if (dummy_head) {
free(dummy_head);
}
_lock_release(&s_mmu_ctx.mutex);
return ret;
}
@ -636,17 +638,19 @@ esp_err_t esp_mmu_unmap(void *ptr)
{
ESP_RETURN_ON_FALSE(ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer");
esp_err_t ret = ESP_FAIL;
mem_region_t *region = NULL;
mem_block_t *mem_block = NULL;
uint32_t ptr_laddr = mmu_ll_vaddr_to_laddr((uint32_t)ptr);
size_t slot_len = 0;
_lock_acquire(&s_mmu_ctx.mutex);
for (int i = 0; i < s_mmu_ctx.num_regions; i++) {
if (ptr_laddr >= s_mmu_ctx.mem_regions[i].free_head && ptr_laddr < s_mmu_ctx.mem_regions[i].end) {
region = &s_mmu_ctx.mem_regions[i];
}
}
ESP_RETURN_ON_FALSE(region, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer is outside external memory regions");
ESP_GOTO_ON_FALSE(region, ESP_ERR_NOT_FOUND, err, TAG, "munmap target pointer is outside external memory regions");
bool found = false;
mem_block_t *found_block = NULL;
@ -667,15 +671,23 @@ esp_err_t esp_mmu_unmap(void *ptr)
}
}
ESP_RETURN_ON_FALSE(found, ESP_ERR_NOT_FOUND, TAG, "munmap target pointer isn't mapped yet");
if (found) {
TAILQ_REMOVE(&region->mem_block_head, found_block, entries);
}
ESP_GOTO_ON_FALSE(found, ESP_ERR_NOT_FOUND, err, TAG, "munmap target pointer isn't mapped yet");
//do unmap
s_do_unmapping(mem_block->vaddr_start, mem_block->size);
//remove the already unmapped block from the list
TAILQ_REMOVE(&region->mem_block_head, found_block, entries);
free(found_block);
_lock_release(&s_mmu_ctx.mutex);
return ESP_OK;
err:
_lock_release(&s_mmu_ctx.mutex);
return ret;
}
esp_err_t esp_mmu_map_dump_mapped_blocks(FILE* stream)

View File

@ -60,8 +60,6 @@ typedef uint32_t esp_paddr_t;
/**
* @brief Map a physical memory block to external virtual address block, with given capabilities.
*
* @note This API does not guarantee thread safety
*
* @param[in] paddr_start Start address of the physical memory block
* @param[in] size Size to be mapped. Size will be rounded up by to the nearest multiple of MMU page size
* @param[in] target Physical memory target you're going to map to, see `mmu_target_t`
@ -88,8 +86,6 @@ esp_err_t esp_mmu_map(esp_paddr_t paddr_start, size_t size, mmu_target_t target,
/**
* @brief Unmap a previously mapped virtual memory block
*
* @note This API does not guarantee thread safety
*
* @param[in] ptr Start address of the virtual memory
*
* @return

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -8,6 +8,9 @@
#include <sys/param.h>
#include <string.h>
#include "inttypes.h"
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
#include "freertos/semphr.h"
#include "esp_log.h"
#include "esp_attr.h"
#include "unity.h"
@ -70,3 +73,56 @@ TEST_CASE("Can find paddr caps by any paddr offset", "[mmu]")
TEST_ESP_OK(esp_mmu_unmap(ptr0));
}
typedef struct {
SemaphoreHandle_t done;
} test_task_arg_t;
void map_task(void *varg)
{
esp_err_t ret = ESP_FAIL;
test_task_arg_t* args = (test_task_arg_t*) varg;
const esp_partition_t *part = s_get_partition();
srand(199);
int cnt = 0;
while (true) {
if (cnt >= 10000) {
break;
}
size_t i = rand() % part->size;
void *ptr0 = NULL;
size_t phys_addr = part->address + i;
size_t region_offset = phys_addr & (CONFIG_MMU_PAGE_SIZE - 1);
size_t mmap_addr = phys_addr & ~(CONFIG_MMU_PAGE_SIZE - 1);
ret = esp_mmu_map(mmap_addr, 1 + region_offset, MMU_TARGET_FLASH0, MMU_MEM_CAP_READ | MMU_MEM_CAP_8BIT, ESP_MMU_MMAP_FLAG_PADDR_SHARED, &ptr0);
if (ret == ESP_OK) {
esp_mmu_unmap(ptr0);
}
cnt++;
vTaskDelay(pdMS_TO_TICKS(1));
}
xSemaphoreGive(args->done);
vTaskDelete(NULL);
}
TEST_CASE("Test mmap concurrency", "[mmu][manual][ignore]")
{
test_task_arg_t args0 = {
.done = xSemaphoreCreateBinary(),
};
test_task_arg_t args1 = {
.done = xSemaphoreCreateBinary(),
};
xTaskCreate(map_task, "map0_task", 8096, &args0, 1, NULL);
xTaskCreate(map_task, "map1_task", 8096, &args1, 1, NULL);
xSemaphoreTake(args0.done, portMAX_DELAY);
xSemaphoreTake(args1.done, portMAX_DELAY);
vTaskDelay(100);
vSemaphoreDelete(args0.done);
vSemaphoreDelete(args1.done);
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -13,11 +13,11 @@
* - traversing all vaddr range to check their attributes
*
* These tests need certain number of internal resources (heap memory), as they uses up the vaddr ranges
* On ESP32, it should be around 450
* On ESP32S2, it should be around 600
* On other chips, it should be around 400
* On ESP32, it should be around 650
* On ESP32S2, it should be around 800
* On other chips, it should be around 600
*/
#define TEST_MEMORY_LEAK_THRESHOLD (-650)
#define TEST_MEMORY_LEAK_THRESHOLD (-800)
static size_t before_free_8bit;
static size_t before_free_32bit;

View File

@ -8,7 +8,7 @@
#include "unity_test_runner.h"
#include "esp_heap_caps.h"
#define TEST_MEMORY_LEAK_THRESHOLD (-600)
#define TEST_MEMORY_LEAK_THRESHOLD (-750)
static size_t before_free_8bit;
static size_t before_free_32bit;

View File

@ -11,7 +11,7 @@
#include "freertos/task.h"
// Some resources are lazy allocated, the threshold is left for that case
#define TEST_MEMORY_LEAK_THRESHOLD (-800)
#define TEST_MEMORY_LEAK_THRESHOLD (-1100)
static size_t before_free_8bit;
static size_t before_free_32bit;

View File

@ -169,7 +169,9 @@ SPI flash can be accessed by SPI1 (ESP-IDF ``esp_flash`` driver APIs), or by poi
Thread Safety
=============
APIs in ``esp_mmu_map.h`` are not guaranteed to be thread-safe.
Following APIs in ``esp_mmu_map.h`` are not guaranteed to be thread-safe:
- :cpp:func:`esp_mmu_map_dump_mapped_blocks`
APIs in ``esp_cache.h`` are guaranteed to be thread-safe.

View File

@ -169,7 +169,9 @@ SPI flash 可以通过 SPI1ESP-IDF ``spi_flash`` 驱动 API或指针进行
线程安全
========
``esp_mmu_map.h`` 中的 API 不能确保线程的安全性
``esp_mmu_map.h`` 中的以下 API 不能确保线程的安全性
- :cpp:func:`esp_mmu_map_dump_mapped_blocks`
``esp_cache.h`` 中的 API 能够确保线程的安全性。