refactor(gdma): move buffer aligment to buffer mount config

This commit is contained in:
Chen Jichang
2025-08-25 14:39:34 +08:00
committed by Chen Ji Chang
parent f2f62b590d
commit ddef1d3d52
16 changed files with 92 additions and 56 deletions

View File

@@ -108,7 +108,6 @@ esp_err_t bitscrambler_loopback_create(bitscrambler_handle_t *handle, int attach
// create DMA link list for TX and RX
gdma_link_list_config_t dma_link_cfg = {
.buffer_alignment = 4,
.item_alignment = align,
.num_items = desc_ct,
};
@@ -229,6 +228,7 @@ esp_err_t bitscrambler_loopback_run(bitscrambler_handle_t bs, void *buffer_in, s
// mount in and out buffer to the DMA link list
gdma_buffer_mount_config_t in_buf_mount_config = {
.buffer = buffer_in,
.buffer_alignment = 4,
.length = length_bytes_in,
.flags = {
.mark_eof = true,
@@ -238,6 +238,7 @@ esp_err_t bitscrambler_loopback_run(bitscrambler_handle_t bs, void *buffer_in, s
gdma_link_mount_buffers(bsl->tx_link_list, 0, &in_buf_mount_config, 1, NULL);
gdma_buffer_mount_config_t out_buf_mount_config = {
.buffer = buffer_out,
.buffer_alignment = 4,
.length = length_bytes_out,
.flags = {
.mark_eof = false,

View File

@@ -306,10 +306,9 @@ static esp_err_t i3c_master_init_dma(i3c_master_bus_t *i3c_master_handle, const
// create DMA link list
size_t int_mem_align = 0;
gdma_get_alignment_constraints(i3c_master_handle->dma_tx_chan, &int_mem_align, NULL);
size_t buffer_alignment = I3C_ALIGN_UP(int_mem_align, I3C_MASTER_DMA_INTERFACE_ALIGNMENT);
size_t num_dma_nodes = esp_dma_calculate_node_count(dma_config->max_transfer_size, buffer_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
i3c_master_handle->dma_buffer_alignment = I3C_ALIGN_UP(int_mem_align, I3C_MASTER_DMA_INTERFACE_ALIGNMENT);
size_t num_dma_nodes = esp_dma_calculate_node_count(dma_config->max_transfer_size, i3c_master_handle->dma_buffer_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = buffer_alignment, // no special buffer alignment for i3c master buffer
.item_alignment = 4, // 4 bytes alignment for AHB-DMA
.num_items = num_dma_nodes, // only one item in the link list so far
};
@@ -462,6 +461,7 @@ static esp_err_t do_dma_transaction_handler(i3c_master_bus_handle_t bus_handle,
size_t dma_aligned_size = I3C_ALIGN_UP(write_size, I3C_MASTER_DMA_INTERFACE_ALIGNMENT);
gdma_buffer_mount_config_t mount_config = {
.buffer = trans->write_buffer,
.buffer_alignment = bus_handle->dma_buffer_alignment,
.length = dma_aligned_size,
.flags = {
.mark_eof = true,
@@ -487,6 +487,7 @@ static esp_err_t do_dma_transaction_handler(i3c_master_bus_handle_t bus_handle,
size_t dma_aligned_size = I3C_ALIGN_UP(read_size, I3C_MASTER_DMA_INTERFACE_ALIGNMENT);
gdma_buffer_mount_config_t mount_config = {
.buffer = trans->read_buffer,
.buffer_alignment = bus_handle->dma_buffer_alignment,
.length = dma_aligned_size,
.flags = {
.mark_eof = true,

View File

@@ -161,6 +161,7 @@ struct i3c_master_bus_t {
gdma_channel_handle_t dma_rx_chan; /**< DMA channel handle for RX. */
gdma_link_list_handle_t tx_dma_link; /**< Linked list for TX DMA. */
gdma_link_list_handle_t rx_dma_link; /**< Linked list for RX DMA. */
size_t dma_buffer_alignment; /**< Alignment of the DMA buffer. */
i3c_transaction_handler_t transaction_handler; /**< Function pointer for transaction handling (FIFO or DMA) */
};

View File

@@ -164,7 +164,6 @@ static esp_err_t parlio_tx_unit_init_dma(parlio_tx_unit_t *tx_unit, const parlio
size_t buffer_alignment = MAX(tx_unit->int_mem_align, tx_unit->ext_mem_align);
size_t num_dma_nodes = esp_dma_calculate_node_count(config->max_transfer_size, buffer_alignment, PARLIO_DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = buffer_alignment,
.item_alignment = PARLIO_DMA_DESC_ALIGNMENT,
.num_items = num_dma_nodes,
};
@@ -452,9 +451,11 @@ esp_err_t parlio_tx_unit_register_event_callbacks(parlio_tx_unit_handle_t tx_uni
static void parlio_mount_buffer(parlio_tx_unit_t *tx_unit, parlio_tx_trans_desc_t *t)
{
size_t buffer_alignment = esp_ptr_internal(t->payload) ? tx_unit->int_mem_align : tx_unit->ext_mem_align;
// DMA transfer data based on bytes not bits, so convert the bit length to bytes, round up
gdma_buffer_mount_config_t mount_config = {
.buffer = (void *)t->payload,
.buffer_alignment = buffer_alignment,
.length = (t->payload_bits + 7) / 8,
.flags = {
// if transmission is loop, we don't need to generate the EOF for 1-bit data width, DIG-559

View File

@@ -22,7 +22,7 @@ static void rmt_rx_default_isr(void *args);
static bool rmt_dma_rx_one_block_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data);
__attribute__((always_inline))
static inline void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void *buffer, size_t buffer_size, size_t per_block_size, size_t last_block_size)
static inline void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void *buffer, size_t buffer_size, size_t mem_alignment, size_t per_block_size, size_t last_block_size)
{
uint8_t *data = (uint8_t *)buffer;
gdma_buffer_mount_config_t mount_configs[rx_chan->num_dma_nodes];
@@ -31,6 +31,7 @@ static inline void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void
mount_configs[i] = (gdma_buffer_mount_config_t) {
.buffer = data + i * per_block_size,
.length = per_block_size,
.buffer_alignment = mem_alignment,
.flags = {
.mark_final = false,
}
@@ -70,7 +71,6 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = rx_channel->dma_int_mem_alignment,
.item_alignment = RMT_DMA_DESC_ALIGN,
.num_items = rx_channel->num_dma_nodes,
.flags = {
@@ -419,7 +419,7 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
size_t per_dma_block_size = buffer_size / rx_chan->num_dma_nodes;
per_dma_block_size = ALIGN_DOWN(per_dma_block_size, mem_alignment);
size_t last_dma_block_size = buffer_size - per_dma_block_size * (rx_chan->num_dma_nodes - 1);
rmt_rx_mount_dma_buffer(rx_chan, buffer, buffer_size, per_dma_block_size, last_dma_block_size);
rmt_rx_mount_dma_buffer(rx_chan, buffer, buffer_size, mem_alignment, per_dma_block_size, last_dma_block_size);
gdma_reset(channel->dma_chan);
gdma_start(channel->dma_chan, gdma_link_get_head_addr(rx_chan->dma_link)); // note, we must use the cached descriptor address to start the DMA
}

View File

@@ -86,7 +86,6 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = int_alignment,
.item_alignment = RMT_DMA_DESC_ALIGN,
.num_items = RMT_DMA_NODES_PING_PONG,
.flags = {
@@ -100,6 +99,7 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
// each descriptor shares half of the DMA buffer
mount_configs[i] = (gdma_buffer_mount_config_t) {
.buffer = tx_channel->dma_mem_base + tx_channel->ping_pong_symbols * i,
.buffer_alignment = int_alignment,
.length = tx_channel->ping_pong_symbols * sizeof(rmt_symbol_word_t),
.flags = {
// each node can generate the DMA eof interrupt, and the driver will do a ping-pong trick in the eof callback

View File

@@ -456,18 +456,23 @@ static bool test_rmt_rx_unaligned_buffer_done_callback(rmt_channel_handle_t chan
return high_task_wakeup == pdTRUE;
}
static void test_rmt_unaligned_receive(size_t mem_block_symbols, int test_symbols_num, bool with_dma, bool en_partial_rx, rmt_clock_source_t clk_src)
static void test_rmt_unaligned_receive(size_t mem_block_symbols, int test_symbols_num, bool with_dma, bool en_partial_rx, bool rx_buffer_use_psram)
{
uint32_t const test_rx_buffer_symbols = 128;
rmt_symbol_word_t *receive_user_buf = heap_caps_aligned_calloc(64, test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA);
uint32_t caps = MALLOC_CAP_8BIT | MALLOC_CAP_DMA;
if (rx_buffer_use_psram) {
caps |= MALLOC_CAP_SPIRAM;
} else {
caps |= MALLOC_CAP_INTERNAL;
}
rmt_symbol_word_t *receive_user_buf = heap_caps_aligned_calloc(64, test_rx_buffer_symbols, sizeof(rmt_symbol_word_t), caps);
TEST_ASSERT_NOT_NULL(receive_user_buf);
rmt_symbol_word_t *receive_user_buf_unaligned = (rmt_symbol_word_t *)((uint8_t *)receive_user_buf + 1);
size_t receive_user_buf_unaligned_size = test_rx_buffer_symbols * sizeof(rmt_symbol_word_t) - 1;
// use TX channel to simulate the input signal
rmt_tx_channel_config_t tx_channel_cfg = {
.clk_src = clk_src,
.clk_src = RMT_CLK_SRC_DEFAULT,
.resolution_hz = 1000000, // 1MHz, 1 tick = 1us
.mem_block_symbols = SOC_RMT_MEM_WORDS_PER_CHANNEL,
.trans_queue_depth = 4,
@@ -485,7 +490,7 @@ static void test_rmt_unaligned_receive(size_t mem_block_symbols, int test_symbol
};
rmt_rx_channel_config_t rx_channel_cfg = {
.clk_src = clk_src,
.clk_src = RMT_CLK_SRC_DEFAULT,
.resolution_hz = 1000000, // 1MHz, 1 tick = 1us
.mem_block_symbols = mem_block_symbols,
.gpio_num = TEST_RMT_GPIO_NUM_A,
@@ -553,14 +558,20 @@ static void test_rmt_unaligned_receive(size_t mem_block_symbols, int test_symbol
TEST_CASE("rmt rx unaligned buffer", "[rmt]")
{
test_rmt_unaligned_receive(SOC_RMT_MEM_WORDS_PER_CHANNEL, SOC_RMT_MEM_WORDS_PER_CHANNEL, false, false, RMT_CLK_SRC_DEFAULT);
test_rmt_unaligned_receive(SOC_RMT_MEM_WORDS_PER_CHANNEL, SOC_RMT_MEM_WORDS_PER_CHANNEL, false, false, false);
#if SOC_RMT_SUPPORT_RX_PINGPONG
test_rmt_unaligned_receive(SOC_RMT_MEM_WORDS_PER_CHANNEL, 10000, false, true, RMT_CLK_SRC_DEFAULT);
test_rmt_unaligned_receive(SOC_RMT_MEM_WORDS_PER_CHANNEL, 10000, false, true, false);
#endif
#if SOC_RMT_SUPPORT_DMA
test_rmt_unaligned_receive(256, SOC_RMT_MEM_WORDS_PER_CHANNEL, true, false, RMT_CLK_SRC_DEFAULT);
test_rmt_unaligned_receive(256, SOC_RMT_MEM_WORDS_PER_CHANNEL, true, false, false);
#if SOC_PSRAM_DMA_CAPABLE
test_rmt_unaligned_receive(256, SOC_RMT_MEM_WORDS_PER_CHANNEL, true, false, true);
#endif
#endif
#if SOC_RMT_SUPPORT_RX_PINGPONG && SOC_RMT_SUPPORT_DMA
test_rmt_unaligned_receive(256, 10000, true, true, RMT_CLK_SRC_DEFAULT);
test_rmt_unaligned_receive(256, 10000, true, true, false);
#if SOC_PSRAM_DMA_CAPABLE
test_rmt_unaligned_receive(256, 10000, true, true, true);
#endif
#endif
}

View File

@@ -199,7 +199,6 @@ static esp_err_t uhci_gdma_initialize(uhci_controller_handle_t uhci_ctrl, const
size_t buffer_alignment = UHCI_MAX(uhci_ctrl->tx_dir.int_mem_align, uhci_ctrl->tx_dir.ext_mem_align);
size_t num_dma_nodes = esp_dma_calculate_node_count(config->max_transmit_size, buffer_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = buffer_alignment,
.item_alignment = 4,
.num_items = num_dma_nodes,
};
@@ -220,7 +219,6 @@ static esp_err_t uhci_gdma_initialize(uhci_controller_handle_t uhci_ctrl, const
gdma_get_alignment_constraints(uhci_ctrl->rx_dir.dma_chan, &uhci_ctrl->rx_dir.int_mem_align, &uhci_ctrl->rx_dir.ext_mem_align);
buffer_alignment = UHCI_MAX(uhci_ctrl->rx_dir.int_mem_align, uhci_ctrl->rx_dir.ext_mem_align);
uhci_ctrl->rx_dir.rx_num_dma_nodes = esp_dma_calculate_node_count(config->max_receive_internal_mem, buffer_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
dma_link_config.buffer_alignment = buffer_alignment;
dma_link_config.num_items = uhci_ctrl->rx_dir.rx_num_dma_nodes;
ESP_RETURN_ON_ERROR(gdma_new_link_list(&dma_link_config, &uhci_ctrl->rx_dir.dma_link), TAG, "DMA rx link list alloc failed");
ESP_LOGD(TAG, "rx_dma node number is %d", uhci_ctrl->rx_dir.rx_num_dma_nodes);
@@ -268,8 +266,10 @@ static esp_err_t uhci_gdma_deinitialize(uhci_controller_handle_t uhci_ctrl)
static void uhci_do_transmit(uhci_controller_handle_t uhci_ctrl, uhci_transaction_desc_t *trans)
{
uhci_ctrl->tx_dir.cur_trans = trans;
size_t buffer_alignment = esp_ptr_internal(trans->buffer) ? uhci_ctrl->tx_dir.int_mem_align : uhci_ctrl->tx_dir.ext_mem_align;
gdma_buffer_mount_config_t mount_config = {
.buffer = trans->buffer,
.buffer_alignment = buffer_alignment,
.length = trans->buffer_size,
.flags = {
.mark_eof = true,
@@ -326,7 +326,7 @@ esp_err_t uhci_receive(uhci_controller_handle_t uhci_ctrl, uint8_t *read_buffer,
for (size_t i = 0; i < node_count; i++) {
uhci_ctrl->rx_dir.buffer_size_per_desc_node[i] = base_size;
uhci_ctrl->rx_dir.buffer_pointers[i] = read_buffer;
size_t buffer_alignment = esp_ptr_internal(read_buffer) ? uhci_ctrl->rx_dir.int_mem_align : uhci_ctrl->rx_dir.ext_mem_align;
// Distribute the remaining size to the first few nodes
if (remaining_size >= max_alignment_needed) {
uhci_ctrl->rx_dir.buffer_size_per_desc_node[i] += max_alignment_needed;
@@ -335,6 +335,7 @@ esp_err_t uhci_receive(uhci_controller_handle_t uhci_ctrl, uint8_t *read_buffer,
mount_configs[i] = (gdma_buffer_mount_config_t) {
.buffer = read_buffer,
.buffer_alignment = buffer_alignment,
.length = uhci_ctrl->rx_dir.buffer_size_per_desc_node[i],
.flags = {
.mark_final = false,

View File

@@ -217,7 +217,6 @@ static esp_err_t mcp_cpdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *
// allocate gdma TX link
gdma_link_list_config_t tx_link_cfg = {
.buffer_alignment = 1, // CP_DMA doesn't have alignment requirement for internal memory
.item_alignment = 4, // CP_DMA requires 4 bytes alignment for each descriptor
.num_items = num_dma_nodes,
.flags = {
@@ -230,6 +229,7 @@ static esp_err_t mcp_cpdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *
gdma_buffer_mount_config_t tx_buf_mount_config[1] = {
[0] = {
.buffer = src,
.buffer_alignment = 1, // CP_DMA doesn't have alignment requirement for internal memory
.length = n,
.flags = {
.mark_eof = true, // mark the last item as EOF, so the RX channel can also received an EOF list item
@@ -241,7 +241,6 @@ static esp_err_t mcp_cpdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *
// allocate gdma RX link
gdma_link_list_config_t rx_link_cfg = {
.buffer_alignment = 1, // CP_DMA doesn't have alignment requirement for internal memory
.item_alignment = 4, // CP_DMA requires 4 bytes alignment for each descriptor
.num_items = num_dma_nodes,
.flags = {
@@ -254,6 +253,7 @@ static esp_err_t mcp_cpdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *
gdma_buffer_mount_config_t rx_buf_mount_config[1] = {
[0] = {
.buffer = dst,
.buffer_alignment = 1, // CP_DMA doesn't have alignment requirement for internal memory
.length = n,
.flags = {
.mark_eof = false, // EOF is set by TX side

View File

@@ -356,7 +356,6 @@ static esp_err_t mcp_gdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *s
buffer_alignment = esp_ptr_internal(src) ? mcp_gdma->tx_int_mem_alignment : mcp_gdma->tx_ext_mem_alignment;
num_dma_nodes = esp_dma_calculate_node_count(n, buffer_alignment, MCP_DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
gdma_link_list_config_t tx_link_cfg = {
.buffer_alignment = buffer_alignment,
.item_alignment = dma_link_item_alignment,
.num_items = num_dma_nodes,
.flags = {
@@ -369,6 +368,7 @@ static esp_err_t mcp_gdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *s
gdma_buffer_mount_config_t tx_buf_mount_config[1] = {
[0] = {
.buffer = src,
.buffer_alignment = buffer_alignment,
.length = n,
.flags = {
.mark_eof = true, // mark the last item as EOF, so the RX channel can also received an EOF list item
@@ -389,7 +389,6 @@ static esp_err_t mcp_gdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *s
buffer_alignment = esp_ptr_internal(dst) ? mcp_gdma->rx_int_mem_alignment : mcp_gdma->rx_ext_mem_alignment;
num_dma_nodes = esp_dma_calculate_node_count(n, buffer_alignment, MCP_DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
gdma_link_list_config_t rx_link_cfg = {
.buffer_alignment = buffer_alignment,
.item_alignment = dma_link_item_alignment,
.num_items = num_dma_nodes + 3, // add 3 extra items for the cache aligned buffers
.flags = {
@@ -406,6 +405,7 @@ static esp_err_t mcp_gdma_memcpy(async_memcpy_context_t *ctx, void *dst, void *s
gdma_buffer_mount_config_t rx_buf_mount_config[3] = {0};
for (int i = 0; i < 3; i++) {
rx_buf_mount_config[i].buffer = trans->rx_buf_array.aligned_buffer[i].aligned_buffer;
rx_buf_mount_config[i].buffer_alignment = buffer_alignment;
rx_buf_mount_config[i].length = trans->rx_buf_array.aligned_buffer[i].length;
}
gdma_link_mount_buffers(trans->rx_link_list, 0, rx_buf_mount_config, 3, NULL);

View File

@@ -51,7 +51,6 @@ struct gdma_link_list_item_t {
typedef struct gdma_link_list_t {
uint32_t num_items; // number of items in the link list
size_t item_size; // size of each item
size_t buffer_alignment; // Alignment of each buffer
uint8_t *items; // pointer to the link list items
uint8_t *items_nc; // pointer to the link list items, non-cached
struct {
@@ -66,11 +65,6 @@ esp_err_t gdma_new_link_list(const gdma_link_list_config_t *config, gdma_link_li
gdma_link_list_t *list = NULL;
ESP_RETURN_ON_FALSE(config && ret_list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(config->num_items, ESP_ERR_INVALID_ARG, TAG, "invalid number of items");
size_t buffer_alignment = config->buffer_alignment;
if (buffer_alignment == 0) {
buffer_alignment = 1;
}
ESP_RETURN_ON_FALSE((buffer_alignment & (buffer_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, TAG, "invalid buffer alignment: %zu", buffer_alignment);
// the link list container is allocated from internal memory
list = heap_caps_calloc(1, sizeof(gdma_link_list_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
@@ -109,7 +103,6 @@ esp_err_t gdma_new_link_list(const gdma_link_list_config_t *config, gdma_link_li
list->items = items;
// calculate the non-cached address
list->items_nc = GDMA_CACHE_ADDR_TO_NON_CACHE_ADDR(items);
list->buffer_alignment = buffer_alignment;
list->flags.check_owner = config->flags.check_owner;
ESP_LOGD(TAG, "new link list @%p, items @%p", list, items);
@@ -137,13 +130,13 @@ esp_err_t gdma_del_link_list(gdma_link_list_handle_t list)
esp_err_t gdma_link_mount_buffers(gdma_link_list_handle_t list, int start_item_index, const gdma_buffer_mount_config_t *buf_config_array, size_t num_buf, int *end_item_index)
{
ESP_RETURN_ON_FALSE(list && buf_config_array && num_buf, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
size_t buffer_alignment = list->buffer_alignment;
if(!list || !buf_config_array || !num_buf) {
return ESP_ERR_INVALID_ARG;
}
size_t item_size = list->item_size;
uint32_t list_item_capacity = list->num_items;
// ensure the start_item_index is between 0 and `list_item_capacity - 1`
start_item_index = (start_item_index % list_item_capacity + list_item_capacity) % list_item_capacity;
size_t max_buffer_mount_length = ALIGN_DOWN(GDMA_MAX_BUFFER_SIZE_PER_LINK_ITEM, buffer_alignment);
uint32_t begin_item_idx = start_item_index;
gdma_link_list_item_t *lli_nc = NULL;
@@ -165,13 +158,19 @@ esp_err_t gdma_link_mount_buffers(gdma_link_list_handle_t list, int start_item_i
const gdma_buffer_mount_config_t *config = &buf_config_array[bi];
uint8_t *buf = (uint8_t *)config->buffer;
size_t len = config->length;
size_t buffer_alignment = config->buffer_alignment;
if (buffer_alignment == 0) {
buffer_alignment = 1;
}
// check the buffer alignment
ESP_RETURN_ON_FALSE_ISR((buffer_alignment & (buffer_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, TAG, "invalid buffer alignment: %"PRIu32"", buffer_alignment);
size_t max_buffer_mount_length = ALIGN_DOWN(GDMA_MAX_BUFFER_SIZE_PER_LINK_ITEM, buffer_alignment);
if (!config->flags.bypass_buffer_align_check) {
ESP_RETURN_ON_FALSE(((uintptr_t)buf & (buffer_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer not aligned to %zu", buffer_alignment);
ESP_RETURN_ON_FALSE_ISR(((uintptr_t)buf & (buffer_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer not aligned to %"PRIu32"", buffer_alignment);
}
uint32_t num_items_need = (len + max_buffer_mount_length - 1) / max_buffer_mount_length;
// check if there are enough link list items
ESP_RETURN_ON_FALSE((begin_item_idx + num_items_need) <= (start_item_index + num_items_avail), ESP_ERR_INVALID_ARG, TAG, "no more space for buffer mounting");
ESP_RETURN_ON_FALSE_ISR((begin_item_idx + num_items_need) <= (start_item_index + num_items_avail), ESP_ERR_INVALID_ARG, TAG, "no more space for buffer mounting");
begin_item_idx += num_items_need;
}
@@ -184,6 +183,11 @@ esp_err_t gdma_link_mount_buffers(gdma_link_list_handle_t list, int start_item_i
const gdma_buffer_mount_config_t *config = &buf_config_array[bi];
uint8_t *buf = (uint8_t *)config->buffer;
size_t len = config->length;
size_t buffer_alignment = config->buffer_alignment;
if (buffer_alignment == 0) {
buffer_alignment = 1;
}
size_t max_buffer_mount_length = ALIGN_DOWN(GDMA_MAX_BUFFER_SIZE_PER_LINK_ITEM, buffer_alignment);
// skip zero-length buffer
if (len == 0 || buf == NULL) {
continue;
@@ -221,13 +225,17 @@ esp_err_t gdma_link_mount_buffers(gdma_link_list_handle_t list, int start_item_i
uintptr_t gdma_link_get_head_addr(gdma_link_list_handle_t list)
{
ESP_RETURN_ON_FALSE(list, 0, TAG, "invalid argument");
if (!list) {
return 0;
}
return (uintptr_t)(list->items);
}
esp_err_t gdma_link_concat(gdma_link_list_handle_t first_link, int first_link_item_index, gdma_link_list_handle_t second_link, int second_link_item_index)
{
ESP_RETURN_ON_FALSE(first_link, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
if(!first_link) {
return ESP_ERR_INVALID_ARG;
}
gdma_link_list_item_t *lli_nc = NULL;
// ensure the first_link_item_index is between 0 and `num_items - 1`
int num_items = first_link->num_items;
@@ -247,7 +255,9 @@ esp_err_t gdma_link_concat(gdma_link_list_handle_t first_link, int first_link_it
esp_err_t gdma_link_set_owner(gdma_link_list_handle_t list, int item_index, gdma_lli_owner_t owner)
{
ESP_RETURN_ON_FALSE_ISR(list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
if (!list) {
return ESP_ERR_INVALID_ARG;
}
int num_items = list->num_items;
// ensure the item_index is between 0 and `num_items - 1`
item_index = (item_index % num_items + num_items) % num_items;
@@ -258,7 +268,9 @@ esp_err_t gdma_link_set_owner(gdma_link_list_handle_t list, int item_index, gdma
esp_err_t gdma_link_get_owner(gdma_link_list_handle_t list, int item_index, gdma_lli_owner_t *owner)
{
ESP_RETURN_ON_FALSE_ISR(list && owner, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
if(!list || !owner) {
return ESP_ERR_INVALID_ARG;
}
int num_items = list->num_items;
// ensure the item_index is between 0 and `num_items - 1`
item_index = (item_index % num_items + num_items) % num_items;
@@ -314,7 +326,9 @@ size_t gdma_link_get_length(gdma_link_list_handle_t list, int item_index)
esp_err_t gdma_link_set_length(gdma_link_list_handle_t list, int item_index, size_t length)
{
ESP_RETURN_ON_FALSE_ISR(list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
if(!list) {
return ESP_ERR_INVALID_ARG;
}
int num_items = list->num_items;
// ensure the item_index is between 0 and `num_items - 1`
item_index = (item_index % num_items + num_items) % num_items;

View File

@@ -24,7 +24,6 @@ typedef struct gdma_link_list_t *gdma_link_list_handle_t;
typedef struct {
uint32_t num_items; //!< Number of nodes in the link list
size_t item_alignment; //!< Alignment of each list item required by the DMA. By default, it's 4 bytes alignment.
size_t buffer_alignment; //!< Alignment of each buffer required by the DMA. By default, it's 1 byte alignment.
struct gdma_link_list_flags {
uint32_t items_in_ext_mem: 1; //!< Whether the link list items are allocated from external memory
uint32_t check_owner: 1; //!< Whether the link list is responsible for checking the ownership when mount data buffers
@@ -62,6 +61,7 @@ esp_err_t gdma_del_link_list(gdma_link_list_handle_t list);
*/
typedef struct {
void *buffer; //!< Buffer to be mounted to the DMA link list
size_t buffer_alignment; //!< Alignment of the buffer. By default, it's 1 byte alignment.
size_t length; //!< Number of bytes that are expected to be transferred
struct gdma_buffer_mount_flags {
uint32_t mark_eof: 1; /*!< Whether to mark the list item as the "EOF" item.

View File

@@ -173,7 +173,7 @@ TEST_CASE("GDMA channel allocation", "[GDMA]")
}
static void test_gdma_config_link_list(gdma_channel_handle_t tx_chan, gdma_channel_handle_t rx_chan,
gdma_link_list_handle_t *tx_link_list, gdma_link_list_handle_t *rx_link_list, size_t sram_alignment, bool dma_link_in_ext_mem)
gdma_link_list_handle_t *tx_link_list, gdma_link_list_handle_t *rx_link_list, bool dma_link_in_ext_mem)
{
gdma_strategy_config_t strategy = {
@@ -193,7 +193,6 @@ static void test_gdma_config_link_list(gdma_channel_handle_t tx_chan, gdma_chann
// create DMA link list for TX channel (a singly link with 3 nodes)
gdma_link_list_config_t tx_link_list_config = {
.buffer_alignment = 1,
.item_alignment = 8, // 8-byte alignment required by the AXI-GDMA
.num_items = 3,
.flags = {
@@ -204,7 +203,6 @@ static void test_gdma_config_link_list(gdma_channel_handle_t tx_chan, gdma_chann
TEST_ESP_OK(gdma_new_link_list(&tx_link_list_config, tx_link_list));
// create DMA link list for RX channel
gdma_link_list_config_t rx_link_list_config = {
.buffer_alignment = sram_alignment, // RX buffer should be aligned to the cache line size, because we will do cache invalidate later
.item_alignment = 8, // 8-byte alignment required by the AXI-GDMA
.num_items = 5,
.flags = {
@@ -235,7 +233,7 @@ static void test_gdma_m2m_transaction(gdma_channel_handle_t tx_chan, gdma_channe
gdma_link_list_handle_t tx_link_list = NULL;
gdma_link_list_handle_t rx_link_list = NULL;
test_gdma_config_link_list(tx_chan, rx_chan, &tx_link_list, &rx_link_list, sram_alignment, dma_link_in_ext_mem);
test_gdma_config_link_list(tx_chan, rx_chan, &tx_link_list, &rx_link_list, dma_link_in_ext_mem);
// allocate the source buffer from SRAM
uint8_t *src_data = heap_caps_calloc(1, 128, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
@@ -270,10 +268,12 @@ static void test_gdma_m2m_transaction(gdma_channel_handle_t tx_chan, gdma_channe
gdma_buffer_mount_config_t tx_buf_mount_config[] = {
[0] = {
.buffer = src_data,
.buffer_alignment = 1,
.length = 64,
},
[1] = {
.buffer = src_data + 64,
.buffer_alignment = 1,
.length = 64,
#if !SOC_DMA_CAN_ACCESS_FLASH
.flags = {
@@ -285,6 +285,7 @@ static void test_gdma_m2m_transaction(gdma_channel_handle_t tx_chan, gdma_channe
#if SOC_DMA_CAN_ACCESS_FLASH
[2] = {
.buffer = (void *)src_string,
.buffer_alignment = 1,
.length = src_string_len,
.flags = {
.mark_eof = true,
@@ -297,6 +298,7 @@ static void test_gdma_m2m_transaction(gdma_channel_handle_t tx_chan, gdma_channe
gdma_buffer_mount_config_t rx_buf_mount_config = {
.buffer = dst_data,
.buffer_alignment = sram_alignment, // RX buffer should be aligned to the cache line size, because we will do cache invalidate later
.length = 256,
};
TEST_ESP_OK(gdma_link_mount_buffers(rx_link_list, 0, &rx_buf_mount_config, 1, NULL));
@@ -428,7 +430,7 @@ static void test_gdma_m2m_unaligned_buffer_test(uint8_t *dst_data, uint8_t *src_
gdma_link_list_handle_t tx_link_list = NULL;
gdma_link_list_handle_t rx_link_list = NULL;
test_gdma_config_link_list(tx_chan, rx_chan, &tx_link_list, &rx_link_list, sram_alignment, false);
test_gdma_config_link_list(tx_chan, rx_chan, &tx_link_list, &rx_link_list, false);
// prepare the source data
for (int i = 0; i < data_length; i++) {
@@ -442,6 +444,7 @@ static void test_gdma_m2m_unaligned_buffer_test(uint8_t *dst_data, uint8_t *src_
gdma_buffer_mount_config_t tx_buf_mount_config[] = {
[0] = {
.buffer = src_data,
.buffer_alignment = 1,
.length = data_length,
.flags = {
.mark_eof = true,
@@ -457,6 +460,7 @@ static void test_gdma_m2m_unaligned_buffer_test(uint8_t *dst_data, uint8_t *src_
TEST_ESP_OK(esp_dma_split_rx_buffer_to_cache_aligned(dst_data + offset_len, data_length, &align_array, &stash_buffer));
for (int i = 0; i < 3; i++) {
rx_aligned_buf_mount_config[i].buffer = align_array.aligned_buffer[i].aligned_buffer;
rx_aligned_buf_mount_config[i].buffer_alignment = sram_alignment;
rx_aligned_buf_mount_config[i].length = align_array.aligned_buffer[i].length;
}
TEST_ESP_OK(gdma_link_mount_buffers(rx_link_list, 0, rx_aligned_buf_mount_config, 3, NULL));
@@ -559,7 +563,6 @@ TEST_CASE("GDMA M2M Unaligned RX Buffer Test", "[GDMA][M2M]")
gdma_link_list_handle_t rx_link_list = NULL;
// create DMA link list for TX channel
gdma_link_list_config_t tx_link_list_config = {
.buffer_alignment = 32,
.item_alignment = 8, // 8-byte alignment required by the AXI-GDMA
.num_items = 20,
.flags = {
@@ -569,7 +572,6 @@ TEST_CASE("GDMA M2M Unaligned RX Buffer Test", "[GDMA][M2M]")
TEST_ESP_OK(gdma_new_link_list(&tx_link_list_config, &tx_link_list));
// create DMA link list for RX channel
gdma_link_list_config_t rx_link_list_config = {
.buffer_alignment = 32,
.item_alignment = 8, // 8-byte alignment required by the AXI-GDMA
.num_items = 20,
.flags = {
@@ -603,6 +605,7 @@ TEST_CASE("GDMA M2M Unaligned RX Buffer Test", "[GDMA][M2M]")
gdma_buffer_mount_config_t tx_buf_mount_config = {
.buffer = src_data,
.buffer_alignment = 32,
.length = COPY_SIZE,
.flags = {
.mark_eof = true,
@@ -613,6 +616,7 @@ TEST_CASE("GDMA M2M Unaligned RX Buffer Test", "[GDMA][M2M]")
gdma_buffer_mount_config_t rx_buf_mount_config = {
.buffer = dst_data,
.buffer_alignment = 32,
.length = COPY_SIZE,
.flags = {
.mark_final = true, // using singly list, so terminate the link here

View File

@@ -111,7 +111,6 @@ esp_err_t esp_lcd_new_i80_bus(const esp_lcd_i80_bus_config_t *bus_config, esp_lc
size_t num_dma_nodes = esp_dma_calculate_node_count(max_transfer_bytes, 1, LCD_DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = 1, // no special buffer alignment for LCD TX buffer
.item_alignment = 4, // 4 bytes alignment for each DMA descriptor
.num_items = num_dma_nodes,
.flags = {
@@ -704,6 +703,7 @@ static void i2s_lcd_trigger_quick_trans_done_event(esp_lcd_i80_bus_handle_t bus)
static uint32_t fake_trigger = 0;
gdma_buffer_mount_config_t mount_config = {
.buffer = &fake_trigger,
.buffer_alignment = 1, // no special buffer alignment for fake trigger
.length = 4,
.flags = {
.mark_eof = true, // mark the "EOF" flag to trigger I2S EOF interrupt
@@ -792,6 +792,7 @@ static IRAM_ATTR void i2s_lcd_default_isr_handler(void *args)
// mount data to DMA links
gdma_buffer_mount_config_t mount_config = {
.buffer = (void *)trans_desc->data,
.buffer_alignment = 1, // no special buffer alignment for LCD TX buffer
.length = trans_desc->data_length,
.flags = {
.mark_eof = true,

View File

@@ -464,9 +464,11 @@ static esp_err_t panel_io_i80_tx_param(esp_lcd_panel_io_t *io, int lcd_cmd, cons
trans_desc->data = (param && param_len) ? bus->format_buffer : NULL;
trans_desc->data_length = trans_desc->data ? param_len : 0;
trans_desc->trans_done_cb = NULL; // no callback for parameter transaction
size_t buffer_alignment = esp_ptr_internal(trans_desc->data) ? bus->int_mem_align : bus->ext_mem_align;
// mount data to DMA links
gdma_buffer_mount_config_t mount_config = {
.buffer = (void *)trans_desc->data,
.buffer_alignment = buffer_alignment,
.length = trans_desc->data_length,
.flags = {
.mark_eof = true,
@@ -599,7 +601,6 @@ static esp_err_t lcd_i80_init_dma_link(esp_lcd_i80_bus_handle_t bus, const esp_l
size_t num_dma_nodes = esp_dma_calculate_node_count(bus->max_transfer_bytes, buffer_alignment, LCD_DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = buffer_alignment,
.item_alignment = LCD_GDMA_DESCRIPTOR_ALIGN,
.num_items = num_dma_nodes,
.flags = {

View File

@@ -999,7 +999,6 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
size_t buffer_alignment = rgb_panel->int_mem_align;
size_t num_dma_nodes_per_bounce_buffer = esp_dma_calculate_node_count(rgb_panel->bb_size, buffer_alignment, LCD_DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
gdma_link_list_config_t link_cfg = {
.buffer_alignment = buffer_alignment,
.item_alignment = LCD_GDMA_DESCRIPTOR_ALIGN,
.num_items = num_dma_nodes_per_bounce_buffer * RGB_LCD_PANEL_BOUNCE_BUF_NUM,
.flags = {
@@ -1011,6 +1010,7 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
gdma_buffer_mount_config_t mount_cfgs[RGB_LCD_PANEL_BOUNCE_BUF_NUM] = {0};
for (int i = 0; i < RGB_LCD_PANEL_BOUNCE_BUF_NUM; i++) {
mount_cfgs[i].buffer = rgb_panel->bounce_buffer[i];
mount_cfgs[i].buffer_alignment = buffer_alignment;
mount_cfgs[i].length = rgb_panel->bb_size;
mount_cfgs[i].flags.mark_eof = true; // we use the DMA EOF interrupt to copy the frame buffer (partially) to the bounce buffer
}
@@ -1019,7 +1019,6 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
#if RGB_LCD_NEEDS_SEPARATE_RESTART_LINK
// create restart link
gdma_link_list_config_t restart_link_cfg = {
.buffer_alignment = buffer_alignment,
.item_alignment = LCD_GDMA_DESCRIPTOR_ALIGN,
.num_items = 1, // the restart link only contains one node
.flags = {
@@ -1029,6 +1028,7 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
ESP_RETURN_ON_ERROR(gdma_new_link_list(&restart_link_cfg, &rgb_panel->dma_restart_link), TAG, "create DMA restart link list failed");
gdma_buffer_mount_config_t restart_buffer_mount_cfg = {
.buffer = rgb_panel->bounce_buffer[0] + restart_skip_bytes,
.buffer_alignment = buffer_alignment,
.length = MIN(LCD_DMA_DESCRIPTOR_BUFFER_MAX_SIZE, rgb_panel->bb_size) - restart_skip_bytes,
};
ESP_RETURN_ON_ERROR(gdma_link_mount_buffers(rgb_panel->dma_restart_link, 0, &restart_buffer_mount_cfg, 1, NULL),
@@ -1042,7 +1042,6 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
size_t buffer_alignment = rgb_panel->flags.fb_in_psram ? rgb_panel->ext_mem_align : rgb_panel->int_mem_align;
uint32_t num_dma_nodes = esp_dma_calculate_node_count(rgb_panel->fb_size, buffer_alignment, LCD_DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
gdma_link_list_config_t link_cfg = {
.buffer_alignment = buffer_alignment,
.item_alignment = LCD_GDMA_DESCRIPTOR_ALIGN,
.num_items = num_dma_nodes,
.flags = {
@@ -1060,13 +1059,13 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
ESP_RETURN_ON_ERROR(gdma_new_link_list(&link_cfg, &rgb_panel->dma_fb_links[i]), TAG, "create frame buffer DMA link failed");
// mount bounce buffers to the DMA link list
mount_cfg.buffer = rgb_panel->fbs[i];
mount_cfg.buffer_alignment = buffer_alignment;
ESP_RETURN_ON_ERROR(gdma_link_mount_buffers(rgb_panel->dma_fb_links[i], 0, &mount_cfg, 1, NULL),
TAG, "mount DMA frame buffer failed");
}
#if RGB_LCD_NEEDS_SEPARATE_RESTART_LINK
// create restart link
gdma_link_list_config_t restart_link_cfg = {
.buffer_alignment = buffer_alignment,
.item_alignment = LCD_GDMA_DESCRIPTOR_ALIGN,
.num_items = 1, // the restart link only contains one node
.flags = {
@@ -1076,6 +1075,7 @@ static esp_err_t lcd_rgb_panel_init_trans_link(esp_rgb_panel_t *rgb_panel)
ESP_RETURN_ON_ERROR(gdma_new_link_list(&restart_link_cfg, &rgb_panel->dma_restart_link), TAG, "create DMA restart link list failed");
gdma_buffer_mount_config_t restart_buffer_mount_cfg = {
.buffer = rgb_panel->fbs[0] + restart_skip_bytes,
.buffer_alignment = buffer_alignment,
.length = MIN(LCD_DMA_DESCRIPTOR_BUFFER_MAX_SIZE, rgb_panel->fb_size) - restart_skip_bytes,
.flags.bypass_buffer_align_check = true, // the restart buffer may doesn't match the buffer alignment but it doesn't really matter in this case
};