feat(rmt): support receive buffer to psram

This commit is contained in:
Chen Jichang
2025-08-25 14:41:48 +08:00
committed by Chen Ji Chang
parent ddef1d3d52
commit 51b253e945
3 changed files with 19 additions and 10 deletions

View File

@@ -225,6 +225,7 @@ struct rmt_rx_channel_t {
rmt_rx_trans_desc_t trans_desc; // transaction description
size_t num_dma_nodes; // number of DMA nodes, determined by how big the memory block that user configures
size_t dma_int_mem_alignment; // DMA buffer alignment (both in size and address) for internal RX memory
size_t dma_ext_mem_alignment; // DMA buffer alignment (both in size and address) for external RX memory
gdma_link_list_handle_t dma_link; // DMA link list handle
};

View File

@@ -51,12 +51,12 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
};
ESP_RETURN_ON_ERROR(gdma_new_ahb_channel(&dma_chan_config, &rx_channel->base.dma_chan), TAG, "allocate RX DMA channel failed");
gdma_transfer_config_t transfer_cfg = {
.access_ext_mem = false, // [IDF-8997]: PSRAM is not supported yet
.access_ext_mem = true, // support receive buffer to PSRAM
.max_data_burst_size = 32,
};
ESP_RETURN_ON_ERROR(gdma_config_transfer(rx_channel->base.dma_chan, &transfer_cfg), TAG, "config DMA transfer failed");
// get the alignment requirement from DMA
gdma_get_alignment_constraints(rx_channel->base.dma_chan, &rx_channel->dma_int_mem_alignment, NULL);
gdma_get_alignment_constraints(rx_channel->base.dma_chan, &rx_channel->dma_int_mem_alignment, &rx_channel->dma_ext_mem_alignment);
// register event callbacks
gdma_rx_event_callbacks_t cbs = {
@@ -65,8 +65,9 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
// register the DMA callbacks may fail if the interrupt service can not be installed successfully
ESP_RETURN_ON_ERROR(gdma_register_rx_event_callbacks(rx_channel->base.dma_chan, &cbs, rx_channel), TAG, "register DMA callbacks failed");
size_t buffer_alignment = MAX(rx_channel->dma_int_mem_alignment, rx_channel->dma_ext_mem_alignment);
rx_channel->num_dma_nodes = esp_dma_calculate_node_count(config->mem_block_symbols * sizeof(rmt_symbol_word_t),
rx_channel->dma_int_mem_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
buffer_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
rx_channel->num_dma_nodes = MAX(2, rx_channel->num_dma_nodes); // at least 2 DMA nodes for ping-pong
// create DMA link list
@@ -355,12 +356,14 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
#if SOC_RMT_SUPPORT_DMA
uint32_t int_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
uint32_t ext_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA);
if (channel->dma_chan) {
// [IDF-8997]: Currently we assume the user buffer is allocated from internal RAM, PSRAM is not supported yet.
ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "user buffer not in the internal RAM");
// append the alignment requirement from the DMA and cache line size
if (esp_ptr_external_ram(buffer)) {
mem_alignment = MAX(MAX(mem_alignment, rx_chan->dma_ext_mem_alignment), ext_mem_cache_line_size);
} else {
mem_alignment = MAX(MAX(mem_alignment, rx_chan->dma_int_mem_alignment), int_mem_cache_line_size);
}
}
#endif // SOC_RMT_SUPPORT_DMA
// Align the buffer address to mem_alignment
@@ -411,8 +414,9 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// invalidate the user buffer, in case cache auto-write back happens and breaks the data just written by the DMA
if (int_mem_cache_line_size) {
// this function will also check the alignment of the buffer and size, against the cache line size
uint32_t data_cache_line_size = esp_cache_get_line_size_by_addr(buffer);
if (data_cache_line_size) {
// invalidate the user buffer, so that the DMA modified data can be seen by CPU
ESP_RETURN_ON_ERROR_ISR(esp_cache_msync(buffer, buffer_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C), TAG, "cache sync failed");
}
// we will mount the buffer to multiple DMA nodes, in a balanced way
@@ -805,7 +809,7 @@ static bool rmt_dma_rx_one_block_cb(gdma_channel_handle_t dma_chan, gdma_event_d
rmt_rx_trans_desc_t *trans_desc = &rx_chan->trans_desc;
uint32_t channel_id = channel->channel_id;
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
uint32_t data_cache_line_size = esp_cache_get_line_size_by_addr(trans_desc->buffer);
if (data_cache_line_size) {
// invalidate the user buffer, so that the DMA modified data can be seen by CPU
esp_cache_msync(trans_desc->buffer, trans_desc->buffer_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C);

View File

@@ -0,0 +1,4 @@
CONFIG_SPIRAM=y
CONFIG_SPIRAM_MODE_HEX=y
CONFIG_SPIRAM_SPEED_200M=y
CONFIG_IDF_EXPERIMENTAL_FEATURES=y