Merge branch 'bugfix/sdmmc_high_prio_timeout_v5.3' into 'release/v5.3'

fix(sdmmc): move DMA descriptor refilling into the ISR (v5.3)

See merge request espressif/esp-idf!37688
This commit is contained in:
morris
2025-04-15 14:47:50 +08:00
10 changed files with 199 additions and 110 deletions

View File

@ -1,11 +1,12 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <sys/param.h>
#include "esp_log.h"
#include "esp_intr_alloc.h"
@ -18,6 +19,7 @@
#include "esp_rom_sys.h"
#include "driver/gpio.h"
#include "driver/sdmmc_host.h"
#include "esp_cache.h"
#include "esp_private/periph_ctrl.h"
#include "sdmmc_private.h"
#include "freertos/FreeRTOS.h"
@ -31,6 +33,12 @@
#define SDMMC_EVENT_QUEUE_LENGTH 32
/* Number of DMA descriptors used for transfer.
* Increasing this value above 4 doesn't improve performance for the usual case
* of SD memory cards (most data transfers are multiples of 512 bytes).
*/
#define SDMMC_DMA_DESC_CNT 4
#if !SOC_RCC_IS_INDEPENDENT
// Reset and Clock Control registers are mixing with other peripherals, so we need to use a critical section
#define SDMMC_RCC_ATOMIC() PERIPH_RCC_ATOMIC()
@ -65,9 +73,14 @@ typedef struct host_ctx_t {
SemaphoreHandle_t io_intr_event;
sdmmc_hal_context_t hal;
slot_ctx_t slot_ctx[SOC_SDMMC_NUM_SLOTS];
uint8_t *data_ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} host_ctx_t;
static host_ctx_t s_host_ctx;
DRAM_DMA_ALIGNED_ATTR static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static void sdmmc_isr(void *arg);
static void sdmmc_host_dma_init(void);
@ -787,6 +800,66 @@ static void sdmmc_host_dma_init(void)
SDMMC.idinten.ti = 1;
}
static size_t get_free_descriptors_count(void)
{
const size_t next = s_host_ctx.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_host_ctx.size_remaining == 0) {
return;
}
const size_t next = s_host_ctx.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_host_ctx.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_host_ctx.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_host_ctx.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_host_ctx.data_ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_host_ctx.size_remaining -= size_to_fill;
s_host_ctx.data_ptr += size_to_fill;
s_host_ctx.next_desc = (s_host_ctx.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_EARLY_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_host_ctx.size_remaining,
s_host_ctx.next_desc, desc->last_descriptor, desc->buffer1_size);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M);
assert(ret == ESP_OK);
#endif
}
}
void sdmmc_host_dma_stop(void)
{
SDMMC.ctrl.use_internal_dma = 0;
@ -795,12 +868,24 @@ void sdmmc_host_dma_stop(void)
SDMMC.bmod.enable = 0;
}
void sdmmc_host_dma_prepare(sdmmc_desc_t *desc, size_t block_size, size_t data_size)
void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size)
{
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_host_ctx.data_ptr = (uint8_t*) data_ptr;
s_host_ctx.size_remaining = data_size;
s_host_ctx.next_desc = 0;
s_host_ctx.desc_remaining = (data_size + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// Set size of data and DMA descriptor pointer
sdmmc_ll_set_data_transfer_len(s_host_ctx.hal.dev, data_size);
sdmmc_ll_set_block_size(s_host_ctx.hal.dev, block_size);
sdmmc_ll_set_desc_addr(s_host_ctx.hal.dev, (uint32_t)desc);
sdmmc_ll_set_desc_addr(s_host_ctx.hal.dev, (uint32_t)&s_dma_desc[0]);
// Enable everything needed to use DMA
sdmmc_ll_enable_dma(s_host_ctx.hal.dev, true);
@ -892,7 +977,19 @@ static void sdmmc_isr(void *arg)
uint32_t dma_pending = SDMMC.idsts.val;
SDMMC.idsts.val = dma_pending;
event.dma_status = dma_pending & 0x1f;
if (dma_pending & SDMMC_LL_EVENT_DMA_NI) {
// refill DMA descriptors
size_t free_desc = get_free_descriptors_count();
if (free_desc > 0) {
fill_dma_descriptors(free_desc);
sdmmc_host_dma_resume();
}
//NI, logic OR of TI and RI. This is a sticky bit and must be cleared each time TI or RI is cleared.
dma_pending &= ~(SDMMC_LL_EVENT_DMA_NI | SDMMC_LL_EVENT_DMA_TI | SDMMC_LL_EVENT_DMA_RI);
}
event.dma_status = dma_pending & SDMMC_LL_EVENT_DMA_MASK;
if (pending != 0 || dma_pending != 0) {
xQueueSendFromISR(queue, &event, &higher_priority_task_awoken);

View File

@ -28,7 +28,7 @@ esp_err_t sdmmc_host_start_command(int slot, sdmmc_hw_cmd_t cmd, uint32_t arg);
esp_err_t sdmmc_host_wait_for_event(int tick_count, sdmmc_event_t* out_event);
void sdmmc_host_dma_prepare(sdmmc_desc_t* desc, size_t block_size, size_t data_size);
void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size);
void sdmmc_host_dma_stop(void);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -22,12 +22,6 @@
#include "sdmmc_private.h"
#include "soc/soc_caps.h"
/* Number of DMA descriptors used for transfer.
* Increasing this value above 4 doesn't improve performance for the usual case
* of SD memory cards (most data transfers are multiples of 512 bytes).
*/
#define SDMMC_DMA_DESC_CNT 4
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
static const char* TAG = "sdmmc_req";
@ -39,13 +33,6 @@ typedef enum {
SDMMC_BUSY,
} sdmmc_req_state_t;
typedef struct {
uint8_t* ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} sdmmc_transfer_state_t;
const uint32_t SDMMC_DATA_ERR_MASK =
SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC |
SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE |
@ -60,8 +47,6 @@ const uint32_t SDMMC_CMD_ERR_MASK =
SDMMC_INTMASK_RCRC |
SDMMC_INTMASK_RESP_ERR;
DRAM_DMA_ALIGNED_ATTR static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static sdmmc_transfer_state_t s_cur_transfer = { 0 };
static QueueHandle_t s_request_mutex;
static bool s_is_app_cmd; // This flag is set if the next command is an APP command
#ifdef CONFIG_PM_ENABLE
@ -75,8 +60,6 @@ static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events);
static void process_command_response(uint32_t status, sdmmc_command_t* cmd);
static void fill_dma_descriptors(size_t num_desc);
static size_t get_free_descriptors_count(void);
static bool wait_for_busy_cleared(uint32_t timeout_ms);
esp_err_t sdmmc_host_transaction_handler_init(void)
@ -152,19 +135,8 @@ esp_err_t sdmmc_host_do_transaction(int slot, sdmmc_command_t* cmdinfo)
goto out;
}
#endif
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_cur_transfer.ptr = (uint8_t*) cmdinfo->data;
s_cur_transfer.size_remaining = cmdinfo->datalen;
s_cur_transfer.next_desc = 0;
s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// write transfer info into hardware
sdmmc_host_dma_prepare(&s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen);
sdmmc_host_dma_prepare(cmdinfo->data, cmdinfo->datalen, cmdinfo->blklen);
}
// write command into hardware, this also sends the command to the card
ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg);
@ -205,66 +177,6 @@ out:
return ret;
}
static size_t get_free_descriptors_count(void)
{
const size_t next = s_cur_transfer.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_cur_transfer.size_remaining == 0) {
return;
}
const size_t next = s_cur_transfer.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_cur_transfer.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_cur_transfer.ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_cur_transfer.size_remaining -= size_to_fill;
s_cur_transfer.ptr += size_to_fill;
s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_cur_transfer.size_remaining,
s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M);
assert(ret == ESP_OK);
#endif
}
}
static esp_err_t handle_idle_state_events(void)
{
/* Handle any events which have happened in between transfers.
@ -467,15 +379,7 @@ static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
sdmmc_host_dma_stop();
}
if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
s_cur_transfer.desc_remaining--;
if (s_cur_transfer.size_remaining) {
int desc_to_fill = get_free_descriptors_count();
fill_dma_descriptors(desc_to_fill);
sdmmc_host_dma_resume();
}
if (s_cur_transfer.desc_remaining == 0) {
next_state = SDMMC_BUSY;
}
next_state = SDMMC_BUSY;
}
if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
// On start bit error, DATA_DONE interrupt will not be generated

View File

@ -5,5 +5,5 @@ set(public_include "include")
idf_component_register(
SRCS ${srcs}
INCLUDE_DIRS ${public_include}
PRIV_REQUIRES sdmmc unity test_utils
PRIV_REQUIRES sdmmc unity test_utils esp_timer
)

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -50,6 +50,12 @@ void sdmmc_test_rw_unaligned_buffer(sdmmc_card_t* card);
*/
void sdmmc_test_rw_with_offset(sdmmc_card_t* card);
/**
* @brief Test read/write with higher priority tasks running concurrently
* @param card Pointer to the card object, must be initialized before calling this function.
*/
void sdmmc_test_rw_highprio_task(sdmmc_card_t* card);
#ifdef __cplusplus
};
#endif

View File

@ -11,6 +11,7 @@
#include <sys/time.h>
#include "esp_dma_utils.h"
#include "esp_heap_caps.h"
#include "esp_timer.h"
#include "test_utils.h"
#include "sdkconfig.h"
#include "soc/soc_caps.h"
@ -185,3 +186,44 @@ void sdmmc_test_rw_with_offset(sdmmc_card_t* card)
do_single_rw_perf_test(card, card->csd.capacity / 2, 8, 1, NULL, 0);
do_single_rw_perf_test(card, card->csd.capacity / 2, 128, 1, NULL, 0);
}
typedef struct {
SemaphoreHandle_t stop;
SemaphoreHandle_t done;
uint32_t busy_time_us;
} highprio_busy_task_args_t;
static void highprio_busy_task(void* varg)
{
highprio_busy_task_args_t* args = (highprio_busy_task_args_t*) varg;
while (xSemaphoreTake(args->stop, 0) != pdTRUE) {
vTaskDelay(1);
int64_t start = esp_timer_get_time();
while (esp_timer_get_time() - start < args->busy_time_us) {
usleep(100);
}
}
xSemaphoreGive(args->done);
vTaskDelete(NULL);
}
void sdmmc_test_rw_highprio_task(sdmmc_card_t* card)
{
highprio_busy_task_args_t args = {
.stop = xSemaphoreCreateBinary(),
.done = xSemaphoreCreateBinary(),
.busy_time_us = 250000,
};
TEST_ASSERT(xTaskCreate(highprio_busy_task, "highprio_busy_task", 4096, &args, 20, NULL));
for (int i = 0; i < 4; ++i) {
do_single_rw_perf_test(card, 0, 64, 0, NULL, 0);
}
xSemaphoreGive(args.stop);
xSemaphoreTake(args.done, portMAX_DELAY);
vTaskDelay(1);
vSemaphoreDelete(args.stop);
vSemaphoreDelete(args.done);
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -106,3 +106,25 @@ TEST_CASE("sdmmc read/write using unaligned buffer, slot 1, 4-bit", "[sdmmc]")
{
do_one_sdmmc_rw_test_unaligned_buffer(SLOT_1, 4, SDMMC_FREQ_DEFAULT, 0);
}
/* ========== Read/write tests with higher priority tasks running concurrently ========== */
static void do_one_sdmmc_rw_test_highprio_task(int slot, int width)
{
sdmmc_card_t card;
sdmmc_test_sd_skip_if_board_incompatible(slot, width, SDMMC_FREQ_DEFAULT, NO_DDR, NO_EMMC);
sdmmc_test_sd_begin(slot, width, SDMMC_FREQ_DEFAULT, NO_DDR, &card);
sdmmc_card_print_info(stdout, &card);
sdmmc_test_rw_highprio_task(&card);
sdmmc_test_sd_end(&card);
}
TEST_CASE("sdmmc read/write with concurrent high-prio task, slot 0, 4-bit", "[sdmmc]")
{
do_one_sdmmc_rw_test_highprio_task(0, 4);
}
TEST_CASE("sdmmc read/write with concurrent high-prio task, slot 1, 4-bit", "[sdmmc]")
{
do_one_sdmmc_rw_test_highprio_task(1, 4);
}

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -26,6 +26,12 @@ extern "C" {
#define SDMMC_LL_GET_HW(id) (((id) == 0) ? (&SDMMC) : NULL)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -28,6 +28,12 @@ extern "C" {
#define SDMMC_LL_GET_HW(id) (((id) == 0) ? (&SDMMC) : NULL)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -26,6 +26,12 @@ extern "C" {
#define SDMMC_LL_GET_HW(id) (((id) == 0) ? (&SDMMC) : NULL)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/