Merge branch 'bugfix/sdmmc_high_prio_timeout_v5.0' into 'release/v5.0'

fix(sdmmc): move DMA descriptor refilling into the ISR (v5.0)

See merge request espressif/esp-idf!37691
This commit is contained in:
morris
2025-03-14 11:48:52 +08:00
3 changed files with 99 additions and 89 deletions

View File

@@ -1,11 +1,12 @@
/* /*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#include <stdbool.h> #include <stdbool.h>
#include <stddef.h> #include <stddef.h>
#include <string.h>
#include <sys/param.h> #include <sys/param.h>
#include "esp_log.h" #include "esp_log.h"
#include "esp_intr_alloc.h" #include "esp_intr_alloc.h"
@@ -25,6 +26,21 @@
#define SDMMC_EVENT_QUEUE_LENGTH 32 #define SDMMC_EVENT_QUEUE_LENGTH 32
/* Number of DMA descriptors used for transfer.
* Increasing this value above 4 doesn't improve performance for the usual case
* of SD memory cards (most data transfers are multiples of 512 bytes).
*/
#define SDMMC_DMA_DESC_CNT 4
typedef struct {
uint8_t* ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} sdmmc_transfer_state_t;
static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static sdmmc_transfer_state_t s_cur_transfer = {};
static void sdmmc_isr(void* arg); static void sdmmc_isr(void* arg);
static void sdmmc_host_dma_init(void); static void sdmmc_host_dma_init(void);
@@ -621,6 +637,57 @@ static void sdmmc_host_dma_init(void)
SDMMC.idinten.ti = 1; SDMMC.idinten.ti = 1;
} }
static size_t get_free_descriptors_count(void)
{
const size_t next = s_cur_transfer.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_cur_transfer.size_remaining == 0) {
return;
}
const size_t next = s_cur_transfer.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_cur_transfer.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_cur_transfer.ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_cur_transfer.size_remaining -= size_to_fill;
s_cur_transfer.ptr += size_to_fill;
s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_cur_transfer.size_remaining,
s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
}
}
void sdmmc_host_dma_stop(void) void sdmmc_host_dma_stop(void)
{ {
@@ -630,12 +697,24 @@ void sdmmc_host_dma_stop(void)
SDMMC.bmod.enable = 0; SDMMC.bmod.enable = 0;
} }
void sdmmc_host_dma_prepare(sdmmc_desc_t* desc, size_t block_size, size_t data_size) void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size)
{ {
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_cur_transfer.ptr = (uint8_t*) data_ptr;
s_cur_transfer.size_remaining = data_size;
s_cur_transfer.next_desc = 0;
s_cur_transfer.desc_remaining = (data_size + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// Set size of data and DMA descriptor pointer // Set size of data and DMA descriptor pointer
SDMMC.bytcnt = data_size; SDMMC.bytcnt = data_size;
SDMMC.blksiz = block_size; SDMMC.blksiz = block_size;
SDMMC.dbaddr = desc; SDMMC.dbaddr = &s_dma_desc[0];
// Enable everything needed to use DMA // Enable everything needed to use DMA
SDMMC.ctrl.dma_enable = 1; SDMMC.ctrl.dma_enable = 1;
@@ -716,6 +795,19 @@ static void sdmmc_isr(void* arg) {
uint32_t dma_pending = SDMMC.idsts.val; uint32_t dma_pending = SDMMC.idsts.val;
SDMMC.idsts.val = dma_pending; SDMMC.idsts.val = dma_pending;
if (dma_pending & SDMMC_IDMAC_INTMASK_NI) {
// refill DMA descriptors
size_t free_desc = get_free_descriptors_count();
if (free_desc > 0) {
fill_dma_descriptors(free_desc);
sdmmc_host_dma_resume();
}
//NI, logic OR of TI and RI. This is a sticky bit and must be cleared each time TI or RI is cleared.
dma_pending &= ~(SDMMC_IDMAC_INTMASK_NI | SDMMC_IDMAC_INTMASK_TI | SDMMC_IDMAC_INTMASK_RI);
}
//NI and AI will be indicated by TI/RI and FBE/DU respectively
event.dma_status = dma_pending & 0x1f; event.dma_status = dma_pending & 0x1f;
if (pending != 0 || dma_pending != 0) { if (pending != 0 || dma_pending != 0) {

View File

@@ -24,7 +24,7 @@ esp_err_t sdmmc_host_start_command(int slot, sdmmc_hw_cmd_t cmd, uint32_t arg);
esp_err_t sdmmc_host_wait_for_event(int tick_count, sdmmc_event_t* out_event); esp_err_t sdmmc_host_wait_for_event(int tick_count, sdmmc_event_t* out_event);
void sdmmc_host_dma_prepare(sdmmc_desc_t* desc, size_t block_size, size_t data_size); void sdmmc_host_dma_prepare(void* data_ptr, size_t data_size, size_t block_size);
void sdmmc_host_dma_stop(void); void sdmmc_host_dma_stop(void);

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -35,13 +35,6 @@ typedef enum {
SDMMC_BUSY, SDMMC_BUSY,
} sdmmc_req_state_t; } sdmmc_req_state_t;
typedef struct {
uint8_t* ptr;
size_t size_remaining;
size_t next_desc;
size_t desc_remaining;
} sdmmc_transfer_state_t;
const uint32_t SDMMC_DATA_ERR_MASK = const uint32_t SDMMC_DATA_ERR_MASK =
SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC | SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC |
SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE | SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE |
@@ -56,8 +49,6 @@ const uint32_t SDMMC_CMD_ERR_MASK =
SDMMC_INTMASK_RCRC | SDMMC_INTMASK_RCRC |
SDMMC_INTMASK_RESP_ERR; SDMMC_INTMASK_RESP_ERR;
static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
static sdmmc_transfer_state_t s_cur_transfer = { 0 };
static QueueHandle_t s_request_mutex; static QueueHandle_t s_request_mutex;
static bool s_is_app_cmd; // This flag is set if the next command is an APP command static bool s_is_app_cmd; // This flag is set if the next command is an APP command
#ifdef CONFIG_PM_ENABLE #ifdef CONFIG_PM_ENABLE
@@ -71,8 +62,6 @@ static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd, static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events); sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events);
static void process_command_response(uint32_t status, sdmmc_command_t* cmd); static void process_command_response(uint32_t status, sdmmc_command_t* cmd);
static void fill_dma_descriptors(size_t num_desc);
static size_t get_free_descriptors_count(void);
static bool wait_for_busy_cleared(uint32_t timeout_ms); static bool wait_for_busy_cleared(uint32_t timeout_ms);
esp_err_t sdmmc_host_transaction_handler_init(void) esp_err_t sdmmc_host_transaction_handler_init(void)
@@ -130,19 +119,8 @@ esp_err_t sdmmc_host_do_transaction(int slot, sdmmc_command_t* cmdinfo)
ret = ESP_ERR_INVALID_ARG; ret = ESP_ERR_INVALID_ARG;
goto out; goto out;
} }
// this clears "owned by IDMAC" bits
memset(s_dma_desc, 0, sizeof(s_dma_desc));
// initialize first descriptor
s_dma_desc[0].first_descriptor = 1;
// save transfer info
s_cur_transfer.ptr = (uint8_t*) cmdinfo->data;
s_cur_transfer.size_remaining = cmdinfo->datalen;
s_cur_transfer.next_desc = 0;
s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
// prepare descriptors
fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
// write transfer info into hardware // write transfer info into hardware
sdmmc_host_dma_prepare(&s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen); sdmmc_host_dma_prepare(cmdinfo->data, cmdinfo->datalen, cmdinfo->blklen);
} }
// write command into hardware, this also sends the command to the card // write command into hardware, this also sends the command to the card
ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg); ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg);
@@ -174,58 +152,6 @@ out:
return ret; return ret;
} }
static size_t get_free_descriptors_count(void)
{
const size_t next = s_cur_transfer.next_desc;
size_t count = 0;
/* Starting with the current DMA descriptor, count the number of
* descriptors which have 'owned_by_idmac' set to 0. These are the
* descriptors already processed by the DMA engine.
*/
for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
if (desc->owned_by_idmac) {
break;
}
++count;
if (desc->next_desc_ptr == NULL) {
/* final descriptor in the chain */
break;
}
}
return count;
}
static void fill_dma_descriptors(size_t num_desc)
{
for (size_t i = 0; i < num_desc; ++i) {
if (s_cur_transfer.size_remaining == 0) {
return;
}
const size_t next = s_cur_transfer.next_desc;
sdmmc_desc_t* desc = &s_dma_desc[next];
assert(!desc->owned_by_idmac);
size_t size_to_fill =
(s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
bool last = size_to_fill == s_cur_transfer.size_remaining;
desc->last_descriptor = last;
desc->second_address_chained = 1;
desc->owned_by_idmac = 1;
desc->buffer1_ptr = s_cur_transfer.ptr;
desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
assert(size_to_fill < 4 || size_to_fill % 4 == 0);
desc->buffer1_size = (size_to_fill + 3) & (~3);
s_cur_transfer.size_remaining -= size_to_fill;
s_cur_transfer.ptr += size_to_fill;
s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
num_desc, next, s_cur_transfer.size_remaining,
s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
}
}
static esp_err_t handle_idle_state_events(void) static esp_err_t handle_idle_state_events(void)
{ {
/* Handle any events which have happened in between transfers. /* Handle any events which have happened in between transfers.
@@ -429,15 +355,7 @@ static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
sdmmc_host_dma_stop(); sdmmc_host_dma_stop();
} }
if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) { if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
s_cur_transfer.desc_remaining--; next_state = SDMMC_BUSY;
if (s_cur_transfer.size_remaining) {
int desc_to_fill = get_free_descriptors_count();
fill_dma_descriptors(desc_to_fill);
sdmmc_host_dma_resume();
}
if (s_cur_transfer.desc_remaining == 0) {
next_state = SDMMC_BUSY;
}
} }
if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) { if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
// On start bit error, DATA_DONE interrupt will not be generated // On start bit error, DATA_DONE interrupt will not be generated