fix(sdmmc): move DMA descriptor refilling into the ISR

Previously, as DMA descriptors were processed, the task performing
SDMMC transfer would get woken up and would refill the descriptors.
This design didn't work correctly when higher priority tasks occupied
the CPU for too long, resulting in SDMMC transfer timing out.

This change moves DMA descriptor refilling into SDMMC ISR. Now the
"DMA done" interrupt is delivered back to task context only when
the entire transfer is completed.

Closes https://github.com/espressif/esp-idf/issues/13934
This commit is contained in:
Ivan Grokhotkov
2025-02-21 16:04:01 +01:00
committed by armando
parent 061ba0c99e
commit 4bd2322993
6 changed files with 123 additions and 105 deletions

View File

@@ -18,6 +18,7 @@
#include "esp_rom_sys.h"
#include "driver/gpio.h"
#include "driver/sdmmc_host.h"
#include "esp_cache.h"
#include "esp_private/esp_clk_tree_common.h"
#include "esp_private/periph_ctrl.h"
#include "sdmmc_internal.h"
@@ -33,6 +34,12 @@
#define SDMMC_EVENT_QUEUE_LENGTH 32
/* Number of DMA descriptors used for transfer.
* Increasing this value above 4 doesn't improve performance for the usual case
* of SD memory cards (most data transfers are multiples of 512 bytes).
*/
#define SDMMC_DMA_DESC_CNT 4
#define SDMMC_FREQ_SDR104 208000 /*!< MMC 208MHz speed */
#if !SOC_RCC_IS_INDEPENDENT
@@ -93,6 +100,10 @@ typedef struct host_ctx_t {
uint8_t num_of_init_slots;
int8_t active_slot_num;
#endif
uint8_t* data_ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} host_ctx_t;
#if SOC_SDMMC_NUM_SLOTS >= 2
@@ -100,6 +111,7 @@ static host_ctx_t s_host_ctx = {.active_slot_num = -1};
#else
static host_ctx_t s_host_ctx = {0};
#endif
DRAM_DMA_ALIGNED_ATTR static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static void sdmmc_isr(void *arg);
static esp_err_t sdmmc_host_pullup_en_internal(int slot, int width);
@@ -1022,17 +1034,89 @@ void sdmmc_host_enable_clk_cmd11(int slot, bool enable)
sdmmc_ll_enable_1v8_mode(s_host_ctx.hal.dev, slot, enable);
}
static size_t get_free_descriptors_count(void)
{
const size_t next = s_host_ctx.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_host_ctx.size_remaining == 0) {
return;
}
const size_t next = s_host_ctx.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_host_ctx.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_host_ctx.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_host_ctx.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_host_ctx.data_ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_host_ctx.size_remaining -= size_to_fill;
s_host_ctx.data_ptr += size_to_fill;
s_host_ctx.next_desc = (s_host_ctx.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_EARLY_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_host_ctx.size_remaining,
s_host_ctx.next_desc, desc->last_descriptor, desc->buffer1_size);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M);
assert(ret == ESP_OK);
#endif
}
}
void sdmmc_host_dma_stop(void)
{
sdmmc_ll_stop_dma(s_host_ctx.hal.dev);
}
void sdmmc_host_dma_prepare(sdmmc_desc_t *desc, size_t block_size, size_t data_size)
void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size)
{
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_host_ctx.data_ptr = (uint8_t*) data_ptr;
s_host_ctx.size_remaining = data_size;
s_host_ctx.next_desc = 0;
s_host_ctx.desc_remaining = (data_size + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// Set size of data and DMA descriptor pointer
sdmmc_ll_set_data_transfer_len(s_host_ctx.hal.dev, data_size);
sdmmc_ll_set_block_size(s_host_ctx.hal.dev, block_size);
sdmmc_ll_set_desc_addr(s_host_ctx.hal.dev, (uint32_t)desc);
sdmmc_ll_set_desc_addr(s_host_ctx.hal.dev, (uint32_t)&s_dma_desc[0]);
// Enable everything needed to use DMA
sdmmc_ll_enable_dma(s_host_ctx.hal.dev, true);
@@ -1124,7 +1208,19 @@ static void sdmmc_isr(void *arg)
uint32_t dma_pending = sdmmc_ll_get_idsts_interrupt_raw(s_host_ctx.hal.dev);
sdmmc_ll_clear_idsts_interrupt(s_host_ctx.hal.dev, dma_pending);
event.dma_status = dma_pending & 0x1f;
if (dma_pending & SDMMC_LL_EVENT_DMA_NI) {
// refill DMA descriptors
size_t free_desc = get_free_descriptors_count();
if (free_desc > 0) {
fill_dma_descriptors(free_desc);
sdmmc_host_dma_resume();
}
//NI, logic OR of TI and RI. This is a sticky bit and must be cleared each time TI or RI is cleared.
dma_pending &= ~(SDMMC_LL_EVENT_DMA_NI | SDMMC_LL_EVENT_DMA_TI | SDMMC_LL_EVENT_DMA_RI);
}
event.dma_status = dma_pending & SDMMC_LL_EVENT_DMA_MASK;
if (pending != 0 || dma_pending != 0) {
xQueueSendFromISR(queue, &event, &higher_priority_task_awoken);

View File

@@ -28,7 +28,7 @@ esp_err_t sdmmc_host_start_command(int slot, sdmmc_hw_cmd_t cmd, uint32_t arg);
esp_err_t sdmmc_host_wait_for_event(int tick_count, sdmmc_event_t* out_event);
void sdmmc_host_dma_prepare(sdmmc_desc_t* desc, size_t block_size, size_t data_size);
void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size);
void sdmmc_host_dma_stop(void);

View File

@@ -24,12 +24,6 @@
#include "soc/soc_caps.h"
#include "hal/sdmmc_ll.h"
/* Number of DMA descriptors used for transfer.
* Increasing this value above 4 doesn't improve performance for the usual case
* of SD memory cards (most data transfers are multiples of 512 bytes).
*/
#define SDMMC_DMA_DESC_CNT 4
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
static const char* TAG = "sdmmc_req";
@@ -43,13 +37,6 @@ typedef enum {
SDMMC_WAITING_VOLTAGE_SWITCH,
} sdmmc_req_state_t;
typedef struct {
uint8_t* ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} sdmmc_transfer_state_t;
const uint32_t SDMMC_DATA_ERR_MASK =
SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC |
SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE |
@@ -64,8 +51,6 @@ const uint32_t SDMMC_CMD_ERR_MASK =
SDMMC_INTMASK_RCRC |
SDMMC_INTMASK_RESP_ERR;
DRAM_DMA_ALIGNED_ATTR static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static sdmmc_transfer_state_t s_cur_transfer = { 0 };
static QueueHandle_t s_request_mutex;
static bool s_is_app_cmd; // This flag is set if the next command is an APP command
#ifdef CONFIG_PM_ENABLE
@@ -79,8 +64,6 @@ static esp_err_t handle_event(int slot, sdmmc_command_t* cmd, sdmmc_req_state_t*
static esp_err_t process_events(int slot, sdmmc_event_t evt, sdmmc_command_t* cmd,
sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events);
static void process_command_response(uint32_t status, sdmmc_command_t* cmd);
static void fill_dma_descriptors(size_t num_desc);
static size_t get_free_descriptors_count(void);
static bool wait_for_busy_cleared(uint32_t timeout_ms);
static void handle_voltage_switch_stage1(int slot, sdmmc_command_t* cmd);
static void handle_voltage_switch_stage2(int slot, sdmmc_command_t* cmd);
@@ -165,19 +148,8 @@ esp_err_t sdmmc_host_do_transaction(int slot, sdmmc_command_t* cmdinfo)
goto out;
}
#endif
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_cur_transfer.ptr = (uint8_t*) cmdinfo->data;
s_cur_transfer.size_remaining = cmdinfo->datalen;
s_cur_transfer.next_desc = 0;
s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// write transfer info into hardware
sdmmc_host_dma_prepare(&s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen);
sdmmc_host_dma_prepare(cmdinfo->data, cmdinfo->datalen, cmdinfo->blklen);
}
// write command into hardware, this also sends the command to the card
ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg);
@@ -221,66 +193,6 @@ out:
return ret;
}
static size_t get_free_descriptors_count(void)
{
const size_t next = s_cur_transfer.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_M2C);
assert(ret == ESP_OK);
#endif
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_cur_transfer.size_remaining == 0) {
return;
}
const size_t next = s_cur_transfer.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_cur_transfer.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_cur_transfer.ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_cur_transfer.size_remaining -= size_to_fill;
s_cur_transfer.ptr += size_to_fill;
s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_cur_transfer.size_remaining,
s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
esp_err_t ret = esp_cache_msync((void *)desc, sizeof(sdmmc_desc_t), ESP_CACHE_MSYNC_FLAG_DIR_C2M);
assert(ret == ESP_OK);
#endif
}
}
static esp_err_t handle_idle_state_events(void)
{
/* Handle any events which have happened in between transfers.
@@ -514,15 +426,7 @@ static esp_err_t process_events(int slot, sdmmc_event_t evt, sdmmc_command_t* cm
sdmmc_host_dma_stop();
}
if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
s_cur_transfer.desc_remaining--;
if (s_cur_transfer.size_remaining) {
int desc_to_fill = get_free_descriptors_count();
fill_dma_descriptors(desc_to_fill);
sdmmc_host_dma_resume();
}
if (s_cur_transfer.desc_remaining == 0) {
next_state = SDMMC_BUSY;
}
next_state = SDMMC_BUSY;
}
if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
// On start bit error, DATA_DONE interrupt will not be generated

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -74,6 +74,12 @@ extern "C" {
SDMMC_LL_EVENT_SBE | SDMMC_LL_EVENT_ACD |\
SDMMC_LL_EVENT_EBE)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -77,6 +77,12 @@ extern "C" {
SDMMC_LL_EVENT_SBE | SDMMC_LL_EVENT_ACD |\
SDMMC_LL_EVENT_EBE)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2023-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -74,6 +74,12 @@ extern "C" {
SDMMC_LL_EVENT_SBE | SDMMC_LL_EVENT_ACD |\
SDMMC_LL_EVENT_EBE)
// DMA interrupts (idsts register)
#define SDMMC_LL_EVENT_DMA_TI SDMMC_IDMAC_INTMASK_TI
#define SDMMC_LL_EVENT_DMA_RI SDMMC_IDMAC_INTMASK_RI
#define SDMMC_LL_EVENT_DMA_NI SDMMC_IDMAC_INTMASK_NI
#define SDMMC_LL_EVENT_DMA_MASK 0x1f //NI and AI will be indicated by TI/RI and FBE/DU respectively
/**
* SDMMC capabilities
*/