fix(sdmmc): move DMA descriptor refilling into the ISR

Previously, as DMA descriptors were processed, the task performing
SDMMC transfer would get woken up and would refill the descriptors.
This design didn't work correctly when higher priority tasks occupied
the CPU for too long, resulting in SDMMC transfer timing out.

This change moves DMA descriptor refilling into SDMMC ISR. Now the
"DMA done" interrupt is delivered back to task context only when
the entire transfer is completed.

Closes https://github.com/espressif/esp-idf/issues/13934
This commit is contained in:
Ivan Grokhotkov
2025-02-21 16:04:01 +01:00
committed by armando
parent efec039d9f
commit f824a827dc
6 changed files with 126 additions and 107 deletions

View File

@ -1,11 +1,12 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdbool.h>
#include <stddef.h>
#include <string.h>
#include <sys/param.h>
#include "esp_log.h"
#include "esp_intr_alloc.h"
@ -18,6 +19,7 @@
#include "esp_rom_sys.h"
#include "driver/gpio.h"
#include "driver/sdmmc_host.h"
#include "esp_cache.h"
#include "esp_private/periph_ctrl.h"
#include "sdmmc_private.h"
#include "freertos/FreeRTOS.h"
@ -31,6 +33,12 @@
#define SDMMC_EVENT_QUEUE_LENGTH 32
/* Number of DMA descriptors used for transfer.
* Increasing this value above 4 doesn't improve performance for the usual case
* of SD memory cards (most data transfers are multiples of 512 bytes).
*/
#define SDMMC_DMA_DESC_CNT 4
#if !SOC_RCC_IS_INDEPENDENT
// Reset and Clock Control registers are mixing with other peripherals, so we need to use a critical section
#define SDMMC_RCC_ATOMIC() PERIPH_RCC_ATOMIC()
@ -65,9 +73,14 @@ typedef struct host_ctx_t {
SemaphoreHandle_t io_intr_event;
sdmmc_hal_context_t hal;
slot_ctx_t slot_ctx[SOC_SDMMC_NUM_SLOTS];
uint8_t *data_ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} host_ctx_t;
static host_ctx_t s_host_ctx;
DRAM_DMA_ALIGNED_ATTR static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static void sdmmc_isr(void *arg);
static void sdmmc_host_dma_init(void);
@ -787,6 +800,66 @@ static void sdmmc_host_dma_init(void)
SDMMC.idinten.ti = 1;
}
static size_t get_free_descriptors_count(void)
{
const size_t next = s_host_ctx.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_host_ctx.size_remaining == 0) {
return;
}
const size_t next = s_host_ctx.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_host_ctx.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_host_ctx.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_host_ctx.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_host_ctx.data_ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_host_ctx.size_remaining -= size_to_fill;
s_host_ctx.data_ptr += size_to_fill;
s_host_ctx.next_desc = (s_host_ctx.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_EARLY_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_host_ctx.size_remaining,
s_host_ctx.next_desc, desc->last_descriptor, desc->buffer1_size);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M);
assert(ret == ESP_OK);
#endif
}
}
void sdmmc_host_dma_stop(void)
{
SDMMC.ctrl.use_internal_dma = 0;
@ -795,12 +868,24 @@ void sdmmc_host_dma_stop(void)
SDMMC.bmod.enable = 0;
}
void sdmmc_host_dma_prepare(sdmmc_desc_t *desc, size_t block_size, size_t data_size)
void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size)
{
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_host_ctx.data_ptr = (uint8_t*) data_ptr;
s_host_ctx.size_remaining = data_size;
s_host_ctx.next_desc = 0;
s_host_ctx.desc_remaining = (data_size + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// Set size of data and DMA descriptor pointer
sdmmc_ll_set_data_transfer_len(s_host_ctx.hal.dev, data_size);
sdmmc_ll_set_block_size(s_host_ctx.hal.dev, block_size);
sdmmc_ll_set_desc_addr(s_host_ctx.hal.dev, (uint32_t)desc);
sdmmc_ll_set_desc_addr(s_host_ctx.hal.dev, (uint32_t)&s_dma_desc[0]);
// Enable everything needed to use DMA
sdmmc_ll_enable_dma(s_host_ctx.hal.dev, true);
@ -892,7 +977,19 @@ static void sdmmc_isr(void *arg)
uint32_t dma_pending = SDMMC.idsts.val;
SDMMC.idsts.val = dma_pending;
event.dma_status = dma_pending & 0x1f;
if (dma_pending & SDMMC_LL_EVENT_DMA_NI) {
// refill DMA descriptors
size_t free_desc = get_free_descriptors_count();
if (free_desc > 0) {
fill_dma_descriptors(free_desc);
sdmmc_host_dma_resume();
}
//NI, logic OR of TI and RI. This is a sticky bit and must be cleared each time TI or RI is cleared.
dma_pending &= ~(SDMMC_LL_EVENT_DMA_NI | SDMMC_LL_EVENT_DMA_TI | SDMMC_LL_EVENT_DMA_RI);
}
event.dma_status = dma_pending & SDMMC_LL_EVENT_DMA_MASK;
if (pending != 0 || dma_pending != 0) {
xQueueSendFromISR(queue, &event, &higher_priority_task_awoken);

View File

@ -28,7 +28,7 @@ esp_err_t sdmmc_host_start_command(int slot, sdmmc_hw_cmd_t cmd, uint32_t arg);
esp_err_t sdmmc_host_wait_for_event(int tick_count, sdmmc_event_t* out_event);
void sdmmc_host_dma_prepare(sdmmc_desc_t* desc, size_t block_size, size_t data_size);
void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size);
void sdmmc_host_dma_stop(void);

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -22,12 +22,6 @@
#include "sdmmc_private.h"
#include "soc/soc_caps.h"
/* Number of DMA descriptors used for transfer.
* Increasing this value above 4 doesn't improve performance for the usual case
* of SD memory cards (most data transfers are multiples of 512 bytes).
*/
#define SDMMC_DMA_DESC_CNT 4
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
static const char* TAG = "sdmmc_req";
@ -39,13 +33,6 @@ typedef enum {
SDMMC_BUSY,
} sdmmc_req_state_t;
typedef struct {
uint8_t* ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} sdmmc_transfer_state_t;
const uint32_t SDMMC_DATA_ERR_MASK =
SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC |
SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE |
@ -60,8 +47,6 @@ const uint32_t SDMMC_CMD_ERR_MASK =
SDMMC_INTMASK_RCRC |
SDMMC_INTMASK_RESP_ERR;
DRAM_DMA_ALIGNED_ATTR static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static sdmmc_transfer_state_t s_cur_transfer = { 0 };
static QueueHandle_t s_request_mutex;
static bool s_is_app_cmd; // This flag is set if the next command is an APP command
#ifdef CONFIG_PM_ENABLE
@ -75,8 +60,6 @@ static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events);
static void process_command_response(uint32_t status, sdmmc_command_t* cmd);
static void fill_dma_descriptors(size_t num_desc);
static size_t get_free_descriptors_count(void);
static bool wait_for_busy_cleared(uint32_t timeout_ms);
esp_err_t sdmmc_host_transaction_handler_init(void)
@ -152,19 +135,8 @@ esp_err_t sdmmc_host_do_transaction(int slot, sdmmc_command_t* cmdinfo)
goto out;
}
#endif
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_cur_transfer.ptr = (uint8_t*) cmdinfo->data;
s_cur_transfer.size_remaining = cmdinfo->datalen;
s_cur_transfer.next_desc = 0;
s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// write transfer info into hardware
sdmmc_host_dma_prepare(&s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen);
sdmmc_host_dma_prepare(cmdinfo->data, cmdinfo->datalen, cmdinfo->blklen);
}
// write command into hardware, this also sends the command to the card
ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg);
@ -205,66 +177,6 @@ out:
return ret;
}
static size_t get_free_descriptors_count(void)
{
const size_t next = s_cur_transfer.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_cur_transfer.size_remaining == 0) {
return;
}
const size_t next = s_cur_transfer.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_cur_transfer.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_cur_transfer.ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_cur_transfer.size_remaining -= size_to_fill;
s_cur_transfer.ptr += size_to_fill;
s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_cur_transfer.size_remaining,
s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M);
assert(ret == ESP_OK);
#endif
}
}
static esp_err_t handle_idle_state_events(void)
{
/* Handle any events which have happened in between transfers.
@ -467,15 +379,7 @@ static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
sdmmc_host_dma_stop();
}
if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
s_cur_transfer.desc_remaining--;
if (s_cur_transfer.size_remaining) {
int desc_to_fill = get_free_descriptors_count();
fill_dma_descriptors(desc_to_fill);
sdmmc_host_dma_resume();
}
if (s_cur_transfer.desc_remaining == 0) {
next_state = SDMMC_BUSY;
}
next_state = SDMMC_BUSY;
}
if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
// On start bit error, DATA_DONE interrupt will not be generated

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -26,6 +26,12 @@ extern "C" {
#define SDMMC_LL_GET_HW(id) (((id) == 0) ? (&SDMMC) : NULL)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -28,6 +28,12 @@ extern "C" {
#define SDMMC_LL_GET_HW(id) (((id) == 0) ? (&SDMMC) : NULL)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/

View File

@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@ -26,6 +26,12 @@ extern "C" {
#define SDMMC_LL_GET_HW(id) (((id) == 0) ? (&SDMMC) : NULL)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/