Merge branch 'refactor/use_gdma_link_in_rmt' into 'master'

refactor(rmt): use gdma link list driver to mount buffer

Closes IDFGH-15289 and IDFGH-15421

See merge request espressif/esp-idf!39350
This commit is contained in:
Chen Ji Chang
2025-08-14 10:49:19 +08:00
31 changed files with 414 additions and 208 deletions

View File

@@ -302,7 +302,7 @@ esp_err_t rmt_tx_stop(rmt_channel_t channel)
{
ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
RMT_ENTER_CRITICAL();
#if SOC_RMT_SUPPORT_TX_ASYNC_STOP
#if SOC_RMT_SUPPORT_ASYNC_STOP
rmt_ll_tx_stop(rmt_contex.hal.regs, channel);
#else
// write ending marker to stop the TX channel

View File

@@ -44,6 +44,7 @@ typedef struct {
uint32_t with_dma: 1; /*!< If set, the driver will allocate an RMT channel with DMA capability */
uint32_t allow_pd: 1; /*!< If set, driver allows the power domain to be powered off when system enters sleep mode.
This can save power, but at the expense of more RAM being consumed to save register context. */
uint32_t init_level: 1; /*!< Set the initial level of the RMT channel signal */
} flags; /*!< TX channel config flags */
} rmt_tx_channel_config_t;

View File

@@ -30,3 +30,21 @@ entries:
if RMT_RECV_FUNC_IN_IRAM = y:
rmt_rx: rmt_receive (noflash)
[mapping:rmt_driver_gdma_link]
archive: libesp_hw_support.a
entries:
if RMT_TX_ISR_HANDLER_IN_IRAM = y && SOC_RMT_SUPPORT_DMA = y:
gdma_link: gdma_link_mount_buffers (noflash)
gdma_link: gdma_link_set_owner (noflash)
gdma_link: gdma_link_get_head_addr (noflash)
gdma_link: gdma_link_set_length (noflash)
gdma_link: gdma_link_concat (noflash)
gdma_link: gdma_link_check_end (noflash)
if RMT_RX_ISR_HANDLER_IN_IRAM = y && SOC_RMT_SUPPORT_DMA = y:
gdma_link: gdma_link_mount_buffers (noflash)
gdma_link: gdma_link_count_buffer_size_till_eof (noflash)
gdma_link: gdma_link_get_head_addr (noflash)
gdma_link: gdma_link_get_length (noflash)
gdma_link: gdma_link_get_buffer (noflash)

View File

@@ -32,8 +32,8 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
uint8_t *input_bytes = (uint8_t *)input_raw;
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
// bitscrambler encoder must be used with a TX channel with DMA enabled
assert(tx_chan->base.dma_chan != NULL);
@@ -52,9 +52,9 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
// mark the start descriptor
if (tx_chan->mem_off_bytes < tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
size_t len = copy_len;
@@ -65,15 +65,14 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
// mark the end descriptor
if (tx_chan->mem_off_bytes < tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
if (encoding_truncated) {
@@ -94,8 +93,7 @@ static size_t rmt_encode_bs(rmt_encoder_t *encoder, rmt_channel_handle_t channel
// reset offset pointer when exceeds maximum range
if (tx_chan->mem_off_bytes >= tx_chan->ping_pong_symbols * 2 * sizeof(rmt_symbol_word_t)) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
tx_chan->mem_off_bytes = 0;
}

View File

@@ -36,8 +36,6 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
const uint8_t *raw_data = (const uint8_t *)primary_data;
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
size_t byte_index = bytes_encoder->last_byte_index;
size_t bit_index = bytes_encoder->last_bit_index;
@@ -58,14 +56,18 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
bool encoding_truncated = mem_have < mem_want;
bool encoding_space_free = mem_have > mem_want;
#if SOC_RMT_SUPPORT_DMA
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
if (channel->dma_chan) {
// mark the start descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
}
#endif // SOC_RMT_SUPPORT_DMA
size_t len = encode_len;
while (len > 0) {
@@ -90,20 +92,21 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
}
}
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// mark the end descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
}
#endif // SOC_RMT_SUPPORT_DMA
if (encoding_truncated) {
// this encoding has not finished yet, save the truncated position
@@ -123,10 +126,11 @@ static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t chan
// reset offset pointer when exceeds maximum range
if (symbol_off >= tx_chan->ping_pong_symbols * 2) {
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
}
#endif // SOC_RMT_SUPPORT_DMA
tx_chan->mem_off_bytes = 0;
} else {
tx_chan->mem_off_bytes = symbol_off * sizeof(rmt_symbol_word_t);

View File

@@ -28,8 +28,6 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
rmt_symbol_word_t *symbols = (rmt_symbol_word_t *)input_symbols;
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
size_t symbol_index = copy_encoder->last_symbol_index;
// how many symbols will be copied by the encoder
@@ -49,14 +47,18 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
bool encoding_truncated = mem_have < mem_want;
bool encoding_space_free = mem_have > mem_want;
#if SOC_RMT_SUPPORT_DMA
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
if (channel->dma_chan) {
// mark the start descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
}
#endif // SOC_RMT_SUPPORT_DMA
size_t len = encode_len;
while (len > 0) {
@@ -64,20 +66,21 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
len--;
}
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// mark the end descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
}
#endif // SOC_RMT_SUPPORT_DMA
if (encoding_truncated) {
// this encoding has not finished yet, save the truncated position
@@ -95,10 +98,11 @@ static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t chann
// reset offset pointer when exceeds maximum range
if (symbol_off >= tx_chan->ping_pong_symbols * 2) {
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
}
#endif // SOC_RMT_SUPPORT_DMA
tx_chan->mem_off_bytes = 0;
} else {
tx_chan->mem_off_bytes = symbol_off * sizeof(rmt_symbol_word_t);

View File

@@ -37,8 +37,6 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
rmt_simple_encoder_t *simple_encoder = __containerof(encoder, rmt_simple_encoder_t, base);
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
rmt_encode_state_t state = RMT_ENCODING_RESET;
rmt_dma_descriptor_t *desc0 = NULL;
rmt_dma_descriptor_t *desc1 = NULL;
size_t symbol_off = tx_chan->mem_off_bytes / sizeof(rmt_symbol_word_t);
// where to put the encoded symbols? DMA buffer or RMT HW memory
@@ -49,14 +47,18 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
mem_to_nc = channel->hw_mem_base;
}
#if SOC_RMT_SUPPORT_DMA
uint8_t dma_lli0_index = 0;
uint8_t dma_lli1_index = 0;
if (channel->dma_chan) {
// mark the start descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc0 = &tx_chan->dma_nodes_nc[0];
dma_lli0_index = 0;
} else {
desc0 = &tx_chan->dma_nodes_nc[1];
dma_lli0_index = 1;
}
}
#endif // SOC_RMT_SUPPORT_DMA
// While we're not done, we need to use the callback to fill the RMT memory until it is
// exactly entirely full. We cannot do that if the RMT memory still has N free spaces
@@ -131,20 +133,21 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
}
}
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// mark the end descriptor
if (symbol_off < tx_chan->ping_pong_symbols) {
desc1 = &tx_chan->dma_nodes_nc[0];
dma_lli1_index = 0;
} else {
desc1 = &tx_chan->dma_nodes_nc[1];
dma_lli1_index = 1;
}
// cross line, means desc0 has prepared with sufficient data buffer
if (desc0 != desc1) {
desc0->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc0->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
if (dma_lli0_index != dma_lli1_index) {
gdma_link_set_owner(tx_chan->dma_link, dma_lli0_index, GDMA_LLI_OWNER_DMA);
}
}
#endif // SOC_RMT_SUPPORT_DMA
if (is_done) {
// reset internal index if encoding session has finished
@@ -158,10 +161,11 @@ static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t cha
// reset offset pointer when exceeds maximum range
if (symbol_off >= tx_chan->ping_pong_symbols * 2) {
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
desc1->dw0.length = tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
desc1->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
gdma_link_set_owner(tx_chan->dma_link, dma_lli1_index, GDMA_LLI_OWNER_DMA);
}
#endif // SOC_RMT_SUPPORT_DMA
tx_chan->mem_off_bytes = 0;
} else {
tx_chan->mem_off_bytes = symbol_off * sizeof(rmt_symbol_word_t);

View File

@@ -41,11 +41,13 @@
#include "esp_pm.h"
#include "esp_attr.h"
#include "esp_private/gdma.h"
#include "esp_private/gdma_link.h"
#include "esp_private/esp_gpio_reserve.h"
#include "esp_private/gpio.h"
#include "esp_private/sleep_retention.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk_tree_common.h"
#include "esp_private/esp_dma_utils.h"
#include "driver/rmt_types.h"
#ifdef __cplusplus
@@ -83,7 +85,6 @@ extern "C" {
// RMT is a slow peripheral, it only supports AHB-GDMA
#define RMT_DMA_DESC_ALIGN 4
typedef dma_descriptor_align4_t rmt_dma_descriptor_t;
#ifdef CACHE_LL_L2MEM_NON_CACHE_ADDR
#define RMT_GET_NON_CACHE_ADDR(addr) (CACHE_LL_L2MEM_NON_CACHE_ADDR(addr))
@@ -199,8 +200,7 @@ struct rmt_tx_channel_t {
rmt_tx_trans_desc_t *cur_trans; // points to current transaction
void *user_data; // user context
rmt_tx_done_callback_t on_trans_done; // callback, invoked on trans done
rmt_dma_descriptor_t *dma_nodes; // DMA descriptor nodes
rmt_dma_descriptor_t *dma_nodes_nc; // DMA descriptor nodes accessed in non-cached way
gdma_link_list_handle_t dma_link; // DMA link list handle
rmt_tx_trans_desc_t trans_desc_pool[]; // transfer descriptor pool
};
@@ -225,8 +225,7 @@ struct rmt_rx_channel_t {
rmt_rx_trans_desc_t trans_desc; // transaction description
size_t num_dma_nodes; // number of DMA nodes, determined by how big the memory block that user configures
size_t dma_int_mem_alignment; // DMA buffer alignment (both in size and address) for internal RX memory
rmt_dma_descriptor_t *dma_nodes; // DMA link nodes
rmt_dma_descriptor_t *dma_nodes_nc; // DMA descriptor nodes accessed in non-cached way
gdma_link_list_handle_t dma_link; // DMA link list handle
};
/**

View File

@@ -25,15 +25,19 @@ __attribute__((always_inline))
static inline void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void *buffer, size_t buffer_size, size_t per_block_size, size_t last_block_size)
{
uint8_t *data = (uint8_t *)buffer;
gdma_buffer_mount_config_t mount_configs[rx_chan->num_dma_nodes];
memset(mount_configs, 0, sizeof(mount_configs));
for (int i = 0; i < rx_chan->num_dma_nodes; i++) {
rmt_dma_descriptor_t *desc_nc = &rx_chan->dma_nodes_nc[i];
desc_nc->buffer = data + i * per_block_size;
desc_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
desc_nc->dw0.suc_eof = 0;
desc_nc->dw0.length = 0;
desc_nc->dw0.size = per_block_size;
mount_configs[i] = (gdma_buffer_mount_config_t) {
.buffer = data + i * per_block_size,
.length = per_block_size,
.flags = {
.mark_final = false,
}
rx_chan->dma_nodes_nc[rx_chan->num_dma_nodes - 1].dw0.size = last_block_size;
};
}
mount_configs[rx_chan->num_dma_nodes - 1].length = last_block_size;
gdma_link_mount_buffers(rx_chan->dma_link, 0, mount_configs, rx_chan->num_dma_nodes, NULL);
}
static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx_channel_config_t *config)
@@ -60,11 +64,21 @@ static esp_err_t rmt_rx_init_dma_link(rmt_rx_channel_t *rx_channel, const rmt_rx
// register the DMA callbacks may fail if the interrupt service can not be installed successfully
ESP_RETURN_ON_ERROR(gdma_register_rx_event_callbacks(rx_channel->base.dma_chan, &cbs, rx_channel), TAG, "register DMA callbacks failed");
// circular DMA descriptor
for (int i = 0; i < rx_channel->num_dma_nodes - 1; i++) {
rx_channel->dma_nodes_nc[i].next = &rx_channel->dma_nodes[i + 1];
}
rx_channel->dma_nodes_nc[rx_channel->num_dma_nodes - 1].next = &rx_channel->dma_nodes[0];
rx_channel->num_dma_nodes = esp_dma_calculate_node_count(config->mem_block_symbols * sizeof(rmt_symbol_word_t),
rx_channel->dma_int_mem_alignment, DMA_DESCRIPTOR_BUFFER_MAX_SIZE);
rx_channel->num_dma_nodes = MAX(2, rx_channel->num_dma_nodes); // at least 2 DMA nodes for ping-pong
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = rx_channel->dma_int_mem_alignment,
.item_alignment = RMT_DMA_DESC_ALIGN,
.num_items = rx_channel->num_dma_nodes,
.flags = {
// the reception may be interrupted by rmt_rx_disable(), DMA may not have accessed the descriptor yet
.check_owner = false,
},
};
ESP_RETURN_ON_ERROR(gdma_new_link_list(&dma_link_config, &rx_channel->dma_link), TAG, "create DMA link list failed");
return ESP_OK;
}
#endif // SOC_RMT_SUPPORT_DMA
@@ -156,14 +170,14 @@ static esp_err_t rmt_rx_destroy(rmt_rx_channel_t *rx_channel)
if (rx_channel->base.dma_chan) {
ESP_RETURN_ON_ERROR(gdma_del_channel(rx_channel->base.dma_chan), TAG, "delete dma channel failed");
}
if (rx_channel->dma_link) {
ESP_RETURN_ON_ERROR(gdma_del_link_list(rx_channel->dma_link), TAG, "delete dma link list failed");
}
#endif // SOC_RMT_SUPPORT_DMA
if (rx_channel->base.group) {
// de-register channel from RMT group
rmt_rx_unregister_from_group(&rx_channel->base, rx_channel->base.group);
}
if (rx_channel->dma_nodes) {
free(rx_channel->dma_nodes);
}
free(rx_channel);
return ESP_OK;
}
@@ -202,31 +216,6 @@ esp_err_t rmt_new_rx_channel(const rmt_rx_channel_config_t *config, rmt_channel_
// gpio is not configured yet
rx_channel->base.gpio_num = -1;
#if SOC_RMT_SUPPORT_DMA
// create DMA descriptor
size_t num_dma_nodes = 0;
if (config->flags.with_dma) {
// DMA descriptors must be placed in internal SRAM
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
num_dma_nodes = config->mem_block_symbols * sizeof(rmt_symbol_word_t) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE + 1;
num_dma_nodes = MAX(2, num_dma_nodes); // at least 2 DMA nodes for ping-pong
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, num_dma_nodes, sizeof(rmt_dma_descriptor_t), mem_caps);
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for rx channel DMA nodes");
rx_channel->dma_nodes = dma_nodes;
// do memory sync only when the data cache exists
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
if (data_cache_line_size) {
// write back and then invalidate the cached dma_nodes, because later the DMA nodes are accessed by non-cacheable address
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, num_dma_nodes * sizeof(rmt_dma_descriptor_t),
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE | ESP_CACHE_MSYNC_FLAG_UNALIGNED),
err, TAG, "cache sync failed");
}
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity
rx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(dma_nodes);
}
rx_channel->num_dma_nodes = num_dma_nodes;
#endif // SOC_RMT_SUPPORT_DMA
// register the channel to group
ESP_GOTO_ON_ERROR(rmt_rx_register_to_group(rx_channel, config), err, TAG, "register channel failed");
rmt_group_t *group = rx_channel->base.group;
@@ -366,21 +355,35 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
size_t mem_alignment = sizeof(rmt_symbol_word_t);
#if SOC_RMT_SUPPORT_DMA
uint32_t int_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
if (channel->dma_chan) {
// append the alignment requirement from the DMA
mem_alignment = MAX(mem_alignment, rx_chan->dma_int_mem_alignment);
// [IDF-8997]: Currently we assume the user buffer is allocated from internal RAM, PSRAM is not supported yet.
ESP_RETURN_ON_FALSE_ISR(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "user buffer not in the internal RAM");
size_t max_buf_sz_per_dma_node = ALIGN_DOWN(DMA_DESCRIPTOR_BUFFER_MAX_SIZE, mem_alignment);
ESP_RETURN_ON_FALSE_ISR(buffer_size <= rx_chan->num_dma_nodes * max_buf_sz_per_dma_node,
ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity: %zu", rx_chan->num_dma_nodes * max_buf_sz_per_dma_node);
// append the alignment requirement from the DMA and cache line size
mem_alignment = MAX(MAX(mem_alignment, rx_chan->dma_int_mem_alignment), int_mem_cache_line_size);
}
#endif // SOC_RMT_SUPPORT_DMA
// check buffer alignment
uint32_t align_check_mask = mem_alignment - 1;
ESP_RETURN_ON_FALSE_ISR(((((uintptr_t)buffer) & align_check_mask) == 0) && (((buffer_size) & align_check_mask) == 0), ESP_ERR_INVALID_ARG,
TAG, "buffer address or size are not %"PRIu32 "bytes aligned", mem_alignment);
// Align the buffer address to mem_alignment
if ((((uintptr_t)buffer) & (mem_alignment - 1)) != 0) {
uintptr_t aligned_address = ALIGN_UP((uintptr_t)buffer, mem_alignment);
size_t offset = aligned_address - (uintptr_t)buffer;
ESP_RETURN_ON_FALSE_ISR(buffer_size > offset, ESP_ERR_INVALID_ARG, TAG, "buffer size is not aligned and is too small, please increase the buffer size");
ESP_EARLY_LOGD(TAG, "origin buffer %p not satisfy alignment %d, align buffer to %p", buffer, mem_alignment, aligned_address);
buffer = (uint8_t *)aligned_address;
buffer_size -= offset;
}
// Align the buffer size to mem_alignment
buffer_size = ALIGN_DOWN(buffer_size, mem_alignment);
ESP_RETURN_ON_FALSE_ISR(buffer_size > 0, ESP_ERR_INVALID_ARG, TAG, "buffer size is less than alignment: %"PRIu32", please increase the buffer size", mem_alignment);
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
size_t max_buf_sz_per_dma_node = ALIGN_DOWN(DMA_DESCRIPTOR_BUFFER_MAX_SIZE, mem_alignment);
ESP_RETURN_ON_FALSE_ISR(buffer_size <= rx_chan->num_dma_nodes * max_buf_sz_per_dma_node,
ESP_ERR_INVALID_ARG, TAG, "buffer size exceeds DMA capacity: %"PRIu32", please increase the mem_block_symbols", rx_chan->num_dma_nodes * max_buf_sz_per_dma_node);
}
#endif // SOC_RMT_SUPPORT_DMA
rmt_group_t *group = channel->group;
rmt_hal_context_t *hal = &group->hal;
@@ -409,7 +412,6 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
// invalidate the user buffer, in case cache auto-write back happens and breaks the data just written by the DMA
uint32_t int_mem_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
if (int_mem_cache_line_size) {
// this function will also check the alignment of the buffer and size, against the cache line size
ESP_RETURN_ON_ERROR_ISR(esp_cache_msync(buffer, buffer_size, ESP_CACHE_MSYNC_FLAG_DIR_M2C), TAG, "cache sync failed");
@@ -420,7 +422,7 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
size_t last_dma_block_size = buffer_size - per_dma_block_size * (rx_chan->num_dma_nodes - 1);
rmt_rx_mount_dma_buffer(rx_chan, buffer, buffer_size, per_dma_block_size, last_dma_block_size);
gdma_reset(channel->dma_chan);
gdma_start(channel->dma_chan, (intptr_t)rx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA
gdma_start(channel->dma_chan, gdma_link_get_head_addr(rx_chan->dma_link)); // note, we must use the cached descriptor address to start the DMA
}
#endif
@@ -586,6 +588,16 @@ bool rmt_isr_handle_rx_done(rmt_rx_channel_t *rx_chan)
rmt_ll_rx_enable(hal->regs, channel_id, false);
portEXIT_CRITICAL_ISR(&channel->spinlock);
#if !SOC_RMT_SUPPORT_ASYNC_STOP
// This is a workaround for ESP32.
// The RX engine can not be disabled once it is enabled in ESP32
// If the state isn't RMT_FSM_RUN, it means the RX engine was disabled
// and we shouldn't process the data.
if (atomic_load(&channel->fsm) != RMT_FSM_RUN) {
return false;
}
#endif
uint32_t offset = rmt_ll_rx_get_memory_writer_offset(hal->regs, channel_id);
// Start from C6, the actual pulse count is the number of input pulses N - 1.
@@ -771,15 +783,7 @@ static void rmt_rx_default_isr(void *args)
__attribute__((always_inline))
static inline size_t rmt_rx_count_symbols_until_eof(rmt_rx_channel_t *rx_chan, int start_index)
{
size_t received_bytes = 0;
for (int i = 0; i < rx_chan->num_dma_nodes; i++) {
received_bytes += rx_chan->dma_nodes_nc[start_index].dw0.length;
if (rx_chan->dma_nodes_nc[start_index].dw0.suc_eof) {
break;
}
start_index++;
start_index %= rx_chan->num_dma_nodes;
}
size_t received_bytes = gdma_link_count_buffer_size_till_eof(rx_chan->dma_link, start_index);
received_bytes = ALIGN_UP(received_bytes, sizeof(rmt_symbol_word_t));
return received_bytes / sizeof(rmt_symbol_word_t);
}
@@ -787,7 +791,7 @@ static inline size_t rmt_rx_count_symbols_until_eof(rmt_rx_channel_t *rx_chan, i
__attribute__((always_inline))
static inline size_t rmt_rx_count_symbols_for_single_block(rmt_rx_channel_t *rx_chan, int desc_index)
{
size_t received_bytes = rx_chan->dma_nodes_nc[desc_index].dw0.length;
size_t received_bytes = gdma_link_get_length(rx_chan->dma_link, desc_index);
received_bytes = ALIGN_UP(received_bytes, sizeof(rmt_symbol_word_t));
return received_bytes / sizeof(rmt_symbol_word_t);
}
@@ -821,7 +825,7 @@ static bool rmt_dma_rx_one_block_cb(gdma_channel_handle_t dma_chan, gdma_event_d
if (rx_chan->on_recv_done) {
int recycle_start_index = trans_desc->dma_desc_index;
rmt_rx_done_event_data_t edata = {
.received_symbols = rx_chan->dma_nodes_nc[recycle_start_index].buffer,
.received_symbols = gdma_link_get_buffer(rx_chan->dma_link, recycle_start_index),
.num_symbols = rmt_rx_count_symbols_until_eof(rx_chan, recycle_start_index),
.flags.is_last = true,
};
@@ -835,7 +839,7 @@ static bool rmt_dma_rx_one_block_cb(gdma_channel_handle_t dma_chan, gdma_event_d
if (rx_chan->on_recv_done) {
size_t dma_desc_index = trans_desc->dma_desc_index;
rmt_rx_done_event_data_t edata = {
.received_symbols = rx_chan->dma_nodes_nc[dma_desc_index].buffer,
.received_symbols = gdma_link_get_buffer(rx_chan->dma_link, dma_desc_index),
.num_symbols = rmt_rx_count_symbols_for_single_block(rx_chan, dma_desc_index),
.flags.is_last = false,
};

View File

@@ -83,15 +83,33 @@ static esp_err_t rmt_tx_init_dma_link(rmt_tx_channel_t *tx_channel, const rmt_tx
TAG, "mem_block_symbols can't exceed %zu", DMA_DESCRIPTOR_BUFFER_MAX_SIZE * RMT_DMA_NODES_PING_PONG / sizeof(rmt_symbol_word_t));
tx_channel->ping_pong_symbols = mount_size_per_node / sizeof(rmt_symbol_word_t);
// create DMA link list
gdma_link_list_config_t dma_link_config = {
.buffer_alignment = int_alignment,
.item_alignment = RMT_DMA_DESC_ALIGN,
.num_items = RMT_DMA_NODES_PING_PONG,
.flags = {
.check_owner = true,
},
};
ESP_RETURN_ON_ERROR(gdma_new_link_list(&dma_link_config, &tx_channel->dma_link), TAG, "create DMA link list failed");
gdma_buffer_mount_config_t mount_configs[RMT_DMA_NODES_PING_PONG];
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
// each descriptor shares half of the DMA buffer
tx_channel->dma_nodes_nc[i].buffer = dma_mem_base + tx_channel->ping_pong_symbols * i;
tx_channel->dma_nodes_nc[i].dw0.size = tx_channel->ping_pong_symbols * sizeof(rmt_symbol_word_t);
// the ownership will be switched to DMA in `rmt_tx_do_transaction()`
tx_channel->dma_nodes_nc[i].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
mount_configs[i] = (gdma_buffer_mount_config_t) {
.buffer = tx_channel->dma_mem_base + tx_channel->ping_pong_symbols * i,
.length = tx_channel->ping_pong_symbols * sizeof(rmt_symbol_word_t),
.flags = {
// each node can generate the DMA eof interrupt, and the driver will do a ping-pong trick in the eof callback
tx_channel->dma_nodes_nc[i].dw0.suc_eof = 1;
.mark_eof = true,
// chain the descriptors into a ring, and will break it in `rmt_encode_eof()`
.mark_final = false,
}
};
}
ESP_RETURN_ON_ERROR(gdma_link_mount_buffers(tx_channel->dma_link, 0, mount_configs, RMT_DMA_NODES_PING_PONG, NULL), TAG, "mount DMA buffers failed");
return ESP_OK;
}
@@ -211,6 +229,9 @@ static esp_err_t rmt_tx_destroy(rmt_tx_channel_t *tx_channel)
if (tx_channel->base.dma_chan) {
ESP_RETURN_ON_ERROR(gdma_del_channel(tx_channel->base.dma_chan), TAG, "delete dma channel failed");
}
if (tx_channel->dma_link) {
ESP_RETURN_ON_ERROR(gdma_del_link_list(tx_channel->dma_link), TAG, "delete dma link list failed");
}
#endif // SOC_RMT_SUPPORT_DMA
for (int i = 0; i < RMT_TX_QUEUE_MAX; i++) {
if (tx_channel->trans_queues[i]) {
@@ -224,9 +245,6 @@ static esp_err_t rmt_tx_destroy(rmt_tx_channel_t *tx_channel)
// de-register channel from RMT group
rmt_tx_unregister_from_group(&tx_channel->base, tx_channel->base.group);
}
if (tx_channel->dma_nodes) {
free(tx_channel->dma_nodes);
}
free(tx_channel);
return ESP_OK;
}
@@ -264,23 +282,6 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
ESP_GOTO_ON_FALSE(tx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for tx channel");
// GPIO configuration is not done yet
tx_channel->base.gpio_num = -1;
// create DMA descriptors
if (config->flags.with_dma) {
// DMA descriptors must be placed in internal SRAM
mem_caps |= MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA;
rmt_dma_descriptor_t *dma_nodes = heap_caps_aligned_calloc(RMT_DMA_DESC_ALIGN, RMT_DMA_NODES_PING_PONG, sizeof(rmt_dma_descriptor_t), mem_caps);
ESP_GOTO_ON_FALSE(dma_nodes, ESP_ERR_NO_MEM, err, TAG, "no mem for tx DMA nodes");
tx_channel->dma_nodes = dma_nodes;
// write back and then invalidate the cached dma_nodes, because later the DMA nodes are accessed by non-cacheable address
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
if (data_cache_line_size) {
ESP_GOTO_ON_ERROR(esp_cache_msync(dma_nodes, RMT_DMA_NODES_PING_PONG * sizeof(rmt_dma_descriptor_t),
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE | ESP_CACHE_MSYNC_FLAG_UNALIGNED),
err, TAG, "cache sync failed");
}
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity
tx_channel->dma_nodes_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(dma_nodes);
}
// create transaction queues
ESP_GOTO_ON_ERROR(rmt_tx_create_trans_queue(tx_channel, config), err, TAG, "install trans queues failed");
// register the channel to group
@@ -330,7 +331,7 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
// disable carrier modulation by default, can re-enable by `rmt_apply_carrier()`
rmt_ll_tx_enable_carrier_modulation(hal->regs, channel_id, false);
// idle level is determined by register value
rmt_ll_tx_fix_idle_level(hal->regs, channel_id, 0, true);
rmt_ll_tx_fix_idle_level(hal->regs, channel_id, config->flags.init_level, true);
// always enable tx wrap, both DMA mode and ping-pong mode rely this feature
rmt_ll_tx_enable_wrap(hal->regs, channel_id, true);
@@ -358,9 +359,9 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
tx_channel->base.disable = rmt_tx_disable;
// return general channel handle
*ret_chan = &tx_channel->base;
ESP_LOGD(TAG, "new tx channel(%d,%d) at %p, gpio=%d, res=%"PRIu32"Hz, hw_mem_base=%p, dma_mem_base=%p, dma_nodes=%p, ping_pong_size=%zu, queue_depth=%zu",
ESP_LOGD(TAG, "new tx channel(%d,%d) at %p, gpio=%d, res=%"PRIu32"Hz, hw_mem_base=%p, dma_mem_base=%p, ping_pong_size=%zu, queue_depth=%zu",
group_id, channel_id, tx_channel, config->gpio_num, tx_channel->base.resolution_hz,
tx_channel->base.hw_mem_base, tx_channel->dma_mem_base, tx_channel->dma_nodes, tx_channel->ping_pong_symbols, tx_channel->queue_size);
tx_channel->base.hw_mem_base, tx_channel->dma_mem_base, tx_channel->ping_pong_symbols, tx_channel->queue_size);
return ESP_OK;
err:
@@ -591,13 +592,12 @@ esp_err_t rmt_tx_wait_all_done(rmt_channel_handle_t channel, int timeout_ms)
return ESP_OK;
}
static size_t rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan, bool need_eof_marker)
size_t rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan, bool need_eof_marker)
{
rmt_channel_t *channel = &tx_chan->base;
rmt_group_t *group = channel->group;
int channel_id = channel->channel_id;
rmt_tx_trans_desc_t *cur_trans = tx_chan->cur_trans;
rmt_dma_descriptor_t *desc_nc = NULL;
if (need_eof_marker) {
rmt_symbol_word_t *mem_to_nc = NULL;
@@ -617,25 +617,28 @@ static size_t rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan, bool need_eof_marker)
tx_chan->mem_off_bytes += sizeof(rmt_symbol_word_t);
}
size_t off = 0;
if (channel->dma_chan) {
if (tx_chan->mem_off_bytes <= tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
desc_nc = &tx_chan->dma_nodes_nc[0];
off = tx_chan->mem_off_bytes;
} else {
desc_nc = &tx_chan->dma_nodes_nc[1];
off = tx_chan->mem_off_bytes - tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
}
desc_nc->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA;
desc_nc->dw0.length = off;
// break down the DMA descriptor link
desc_nc->next = NULL;
} else {
if (!channel->dma_chan) {
portENTER_CRITICAL_ISR(&group->spinlock);
// This is the end of a sequence of encoding sessions, disable the threshold interrupt as no more data will be put into RMT memory block
rmt_ll_enable_interrupt(group->hal.regs, RMT_LL_EVENT_TX_THRES(channel_id), false);
portEXIT_CRITICAL_ISR(&group->spinlock);
}
#if SOC_RMT_SUPPORT_DMA
else {
int dma_lli_index = 0;
size_t off = 0;
if (tx_chan->mem_off_bytes <= tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t)) {
dma_lli_index = 0;
off = tx_chan->mem_off_bytes;
} else {
dma_lli_index = 1;
off = tx_chan->mem_off_bytes - tx_chan->ping_pong_symbols * sizeof(rmt_symbol_word_t);
}
gdma_link_set_length(tx_chan->dma_link, dma_lli_index, off);
gdma_link_set_owner(tx_chan->dma_link, dma_lli_index, GDMA_LLI_OWNER_DMA);
gdma_link_concat(tx_chan->dma_link, dma_lli_index, NULL, 0);
}
#endif // SOC_RMT_SUPPORT_DMA
return need_eof_marker ? 1 : 0;
}
@@ -684,10 +687,10 @@ static void rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t
gdma_reset(channel->dma_chan);
// chain the descriptors into a ring, and will break it in `rmt_encode_eof()`
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
tx_chan->dma_nodes_nc[i].next = &tx_chan->dma_nodes[i + 1]; // note, we must use the cache address for the next pointer
tx_chan->dma_nodes_nc[i].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_CPU;
// we will set the owner to DMA in the encoding session
gdma_link_set_owner(tx_chan->dma_link, i, GDMA_LLI_OWNER_CPU);
gdma_link_concat(tx_chan->dma_link, i, tx_chan->dma_link, i + 1);
}
tx_chan->dma_nodes_nc[RMT_DMA_NODES_PING_PONG - 1].next = &tx_chan->dma_nodes[0];
}
#endif // SOC_RMT_SUPPORT_DMA
@@ -739,7 +742,7 @@ static void rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t
#if SOC_RMT_SUPPORT_DMA
if (channel->dma_chan) {
gdma_start(channel->dma_chan, (intptr_t)tx_chan->dma_nodes); // note, we must use the cached descriptor address to start the DMA
gdma_start(channel->dma_chan, gdma_link_get_head_addr(tx_chan->dma_link)); // note, we must use the cached descriptor address to start the DMA
// delay a while, wait for DMA data going to RMT memory block
esp_rom_delay_us(1);
}
@@ -818,14 +821,14 @@ static esp_err_t rmt_tx_disable(rmt_channel_handle_t channel)
// disable the hardware
portENTER_CRITICAL(&channel->spinlock);
rmt_ll_tx_enable_loop(hal->regs, channel->channel_id, false);
#if SOC_RMT_SUPPORT_TX_ASYNC_STOP
#if SOC_RMT_SUPPORT_ASYNC_STOP
rmt_ll_tx_stop(hal->regs, channel->channel_id);
#endif
portEXIT_CRITICAL(&channel->spinlock);
portENTER_CRITICAL(&group->spinlock);
rmt_ll_enable_interrupt(hal->regs, RMT_LL_EVENT_TX_MASK(channel_id), false);
#if !SOC_RMT_SUPPORT_TX_ASYNC_STOP
#if !SOC_RMT_SUPPORT_ASYNC_STOP
// we do a trick to stop the undergoing transmission
// stop interrupt, insert EOF marker to the RMT memory, polling the trans_done event
channel->hw_mem_base[0].val = 0;
@@ -1099,15 +1102,11 @@ static void rmt_tx_default_isr(void *args)
static bool rmt_dma_tx_eof_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
{
rmt_tx_channel_t *tx_chan = (rmt_tx_channel_t *)user_data;
// tx_eof_desc_addr must be non-zero, guaranteed by the hardware
rmt_dma_descriptor_t *eof_desc_nc = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(event_data->tx_eof_desc_addr);
if (!eof_desc_nc->next) {
// Due to concurrent software and DMA, check each node to ensure that this ring has been broken
for (int i = 0; i < RMT_DMA_NODES_PING_PONG; i++) {
if (gdma_link_check_end(tx_chan->dma_link, i)) {
return false;
}
// next points to a cache address, convert it to a non-cached one
rmt_dma_descriptor_t *n = (rmt_dma_descriptor_t *)RMT_GET_NON_CACHE_ADDR(eof_desc_nc->next);
if (!n->next) {
return false;
}
// if the DMA descriptor link is still a ring (i.e. hasn't broken down by `rmt_tx_mark_eof()`), then we treat it as a valid ping-pong event
// continue ping-pong transmission

View File

@@ -25,6 +25,8 @@
typedef struct {
TaskHandle_t task_to_notify;
size_t received_symbol_num;
rmt_symbol_word_t *received_symbols;
bool is_first_event;
} test_rx_user_data_t;
TEST_RMT_CALLBACK_ATTR
@@ -45,7 +47,7 @@ static bool test_rmt_rx_done_callback(rmt_channel_handle_t channel, const rmt_rx
static void test_rmt_rx_nec_carrier(size_t mem_block_symbols, bool with_dma, rmt_clock_source_t clk_src)
{
uint32_t const test_rx_buffer_symbols = 128;
rmt_symbol_word_t *remote_codes = heap_caps_aligned_calloc(64, test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
rmt_symbol_word_t *remote_codes = heap_caps_calloc(test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA);
TEST_ASSERT_NOT_NULL(remote_codes);
@@ -180,9 +182,6 @@ static void test_rmt_rx_nec_carrier(size_t mem_block_symbols, bool with_dma, rmt
TEST_ESP_OK(rmt_tx_wait_all_done(tx_channel, -1));
// test rmt receive with unaligned buffer
TEST_ESP_ERR(ESP_ERR_INVALID_ARG, rmt_receive(rx_channel, remote_codes, 13, &receive_config));
printf("disable tx and rx channels\r\n");
TEST_ESP_OK(rmt_disable(tx_channel));
TEST_ESP_OK(rmt_disable(rx_channel));
@@ -234,7 +233,7 @@ static bool test_rmt_partial_receive_done(rmt_channel_handle_t channel, const rm
static void test_rmt_partial_receive(size_t mem_block_symbols, int test_symbols_num, bool with_dma, rmt_clock_source_t clk_src)
{
uint32_t const test_rx_buffer_symbols = 128; // the user buffer is small, it can't hold all the received symbols
rmt_symbol_word_t *receive_user_buf = heap_caps_aligned_calloc(64, test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
rmt_symbol_word_t *receive_user_buf = heap_caps_calloc(test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA);
TEST_ASSERT_NOT_NULL(receive_user_buf);
@@ -334,7 +333,7 @@ static bool test_rmt_received_done(rmt_channel_handle_t channel, const rmt_rx_do
static void test_rmt_receive_filter(rmt_clock_source_t clk_src)
{
uint32_t const test_rx_buffer_symbols = 32;
rmt_symbol_word_t *receive_user_buf = heap_caps_aligned_calloc(64, test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
rmt_symbol_word_t *receive_user_buf = heap_caps_calloc(test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA);
TEST_ASSERT_NOT_NULL(receive_user_buf);
@@ -440,3 +439,128 @@ TEST_CASE("rmt rx filter functionality", "[rmt]")
test_rmt_receive_filter(clk_srcs[i]);
}
}
TEST_RMT_CALLBACK_ATTR
static bool test_rmt_rx_unaligned_buffer_done_callback(rmt_channel_handle_t channel, const rmt_rx_done_event_data_t *edata, void *user_data)
{
BaseType_t high_task_wakeup = pdFALSE;
test_rx_user_data_t *test_user_data = (test_rx_user_data_t *)user_data;
if (test_user_data->is_first_event) {
test_user_data->received_symbols = edata->received_symbols;
test_user_data->is_first_event = false;
}
test_user_data->received_symbol_num += edata->num_symbols;
if (edata->flags.is_last) {
vTaskNotifyGiveFromISR(test_user_data->task_to_notify, &high_task_wakeup);
}
return high_task_wakeup == pdTRUE;
}
static void test_rmt_unaligned_receive(size_t mem_block_symbols, int test_symbols_num, bool with_dma, bool en_partial_rx, rmt_clock_source_t clk_src)
{
uint32_t const test_rx_buffer_symbols = 128;
rmt_symbol_word_t *receive_user_buf = heap_caps_aligned_calloc(64, test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA);
TEST_ASSERT_NOT_NULL(receive_user_buf);
rmt_symbol_word_t *receive_user_buf_unaligned = (rmt_symbol_word_t *)((uint8_t *)receive_user_buf + 1);
size_t receive_user_buf_unaligned_size = test_rx_buffer_symbols * sizeof(rmt_symbol_word_t) - 1;
// use TX channel to simulate the input signal
rmt_tx_channel_config_t tx_channel_cfg = {
.clk_src = clk_src,
.resolution_hz = 1000000, // 1MHz, 1 tick = 1us
.mem_block_symbols = SOC_RMT_MEM_WORDS_PER_CHANNEL,
.trans_queue_depth = 4,
.gpio_num = TEST_RMT_GPIO_NUM_A,
};
rmt_channel_handle_t tx_channel = NULL;
TEST_ESP_OK(rmt_new_tx_channel(&tx_channel_cfg, &tx_channel));
rmt_encoder_handle_t copy_encoder = NULL;
rmt_copy_encoder_config_t encoder_cfg = {};
TEST_ESP_OK(rmt_new_copy_encoder(&encoder_cfg, &copy_encoder));
rmt_transmit_config_t transmit_config = {
.loop_count = 0,
};
rmt_rx_channel_config_t rx_channel_cfg = {
.clk_src = clk_src,
.resolution_hz = 1000000, // 1MHz, 1 tick = 1us
.mem_block_symbols = mem_block_symbols,
.gpio_num = TEST_RMT_GPIO_NUM_A,
.flags.with_dma = with_dma,
};
rmt_channel_handle_t rx_channel = NULL;
TEST_ESP_OK(rmt_new_rx_channel(&rx_channel_cfg, &rx_channel));
rmt_rx_event_callbacks_t cbs = {
.on_recv_done = test_rmt_rx_unaligned_buffer_done_callback,
};
test_rx_user_data_t test_user_data = {
.task_to_notify = xTaskGetCurrentTaskHandle(),
.received_symbol_num = 0,
.is_first_event = true,
};
TEST_ESP_OK(rmt_rx_register_event_callbacks(rx_channel, &cbs, &test_user_data));
TEST_ESP_OK(rmt_enable(tx_channel));
TEST_ESP_OK(rmt_enable(rx_channel));
rmt_receive_config_t rx_config = {
.signal_range_min_ns = 1250,
.signal_range_max_ns = 12000000,
.flags.en_partial_rx = en_partial_rx,
};
// ready to receive
TEST_ESP_OK(rmt_receive(rx_channel, receive_user_buf_unaligned, receive_user_buf_unaligned_size, &rx_config));
rmt_symbol_word_t *transmit_buf = heap_caps_calloc(test_symbols_num, sizeof(rmt_symbol_word_t),
MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA);
for (int i = 0; i < test_symbols_num; i++) {
transmit_buf[i] = (rmt_symbol_word_t) {
.level0 = 1,
.duration0 = 75,
.level1 = 0,
.duration1 = 25,
};
}
TEST_ESP_OK(rmt_transmit(tx_channel, copy_encoder, transmit_buf, test_symbols_num * sizeof(rmt_symbol_word_t), &transmit_config));
TEST_ASSERT_NOT_EQUAL(0, ulTaskNotifyTake(pdFALSE, pdMS_TO_TICKS(2000)));
printf("received %zu symbols\r\n", test_user_data.received_symbol_num);
// Some chips do not support auto stop in loop mode, so the received symbol number may be slightly more than the expected
TEST_ASSERT_INT_WITHIN(15, test_symbols_num, test_user_data.received_symbol_num);
// verify the received data
for (int i = 0; i < 10; i++) {
printf("{%d:%d},{%d:%d}\r\n", test_user_data.received_symbols[i].level0, test_user_data.received_symbols[i].duration0, test_user_data.received_symbols[i].level1, test_user_data.received_symbols[i].duration1);
TEST_ASSERT_EQUAL(1, test_user_data.received_symbols[i].level0);
TEST_ASSERT_INT_WITHIN(20, 75, test_user_data.received_symbols[i].duration0);
TEST_ASSERT_EQUAL(0, test_user_data.received_symbols[i].level1);
TEST_ASSERT_INT_WITHIN(20, 25, test_user_data.received_symbols[i].duration1);
}
TEST_ESP_OK(rmt_disable(tx_channel));
TEST_ESP_OK(rmt_disable(rx_channel));
TEST_ESP_OK(rmt_del_channel(rx_channel));
TEST_ESP_OK(rmt_del_channel(tx_channel));
TEST_ESP_OK(rmt_del_encoder(copy_encoder));
free(receive_user_buf);
free(transmit_buf);
}
TEST_CASE("rmt rx unaligned buffer", "[rmt]")
{
test_rmt_unaligned_receive(SOC_RMT_MEM_WORDS_PER_CHANNEL, SOC_RMT_MEM_WORDS_PER_CHANNEL, false, false, RMT_CLK_SRC_DEFAULT);
#if SOC_RMT_SUPPORT_RX_PINGPONG
test_rmt_unaligned_receive(SOC_RMT_MEM_WORDS_PER_CHANNEL, 10000, false, true, RMT_CLK_SRC_DEFAULT);
#endif
#if SOC_RMT_SUPPORT_DMA
test_rmt_unaligned_receive(256, SOC_RMT_MEM_WORDS_PER_CHANNEL, true, false, RMT_CLK_SRC_DEFAULT);
#endif
#if SOC_RMT_SUPPORT_RX_PINGPONG && SOC_RMT_SUPPORT_DMA
test_rmt_unaligned_receive(256, 10000, true, true, RMT_CLK_SRC_DEFAULT);
#endif
}

View File

@@ -227,17 +227,21 @@ uintptr_t gdma_link_get_head_addr(gdma_link_list_handle_t list)
esp_err_t gdma_link_concat(gdma_link_list_handle_t first_link, int first_link_item_index, gdma_link_list_handle_t second_link, int second_link_item_index)
{
ESP_RETURN_ON_FALSE(first_link && second_link, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(first_link, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
gdma_link_list_item_t *lli_nc = NULL;
// ensure the first_link_item_index is between 0 and `num_items - 1`
int num_items = first_link->num_items;
first_link_item_index = (first_link_item_index % num_items + num_items) % num_items;
lli_nc = (gdma_link_list_item_t *)(first_link->items_nc + first_link_item_index * first_link->item_size);
if (second_link == NULL) {
lli_nc->next = NULL;
} else {
// ensure the second_link_item_index is between 0 and `num_items - 1`
num_items = second_link->num_items;
second_link_item_index = (second_link_item_index % num_items + num_items) % num_items;
// concatenate the two link lists
lli_nc->next = (gdma_link_list_item_t *)(second_link->items + second_link_item_index * second_link->item_size);
}
return ESP_OK;
}
@@ -307,3 +311,26 @@ size_t gdma_link_get_length(gdma_link_list_handle_t list, int item_index)
gdma_link_list_item_t *lli = (gdma_link_list_item_t *)(list->items_nc + item_index * list->item_size);
return lli->dw0.length;
}
esp_err_t gdma_link_set_length(gdma_link_list_handle_t list, int item_index, size_t length)
{
ESP_RETURN_ON_FALSE_ISR(list, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
int num_items = list->num_items;
// ensure the item_index is between 0 and `num_items - 1`
item_index = (item_index % num_items + num_items) % num_items;
gdma_link_list_item_t *lli = (gdma_link_list_item_t *)(list->items_nc + item_index * list->item_size);
lli->dw0.length = length;
return ESP_OK;
}
bool gdma_link_check_end(gdma_link_list_handle_t list, int item_index)
{
if (!list) {
return false;
}
int num_items = list->num_items;
// ensure the item_index is between 0 and `num_items - 1`
item_index = (item_index % num_items + num_items) % num_items;
gdma_link_list_item_t *lli = (gdma_link_list_item_t *)(list->items_nc + item_index * list->item_size);
return lli->next == NULL;
}

View File

@@ -121,6 +121,8 @@ uintptr_t gdma_link_get_head_addr(gdma_link_list_handle_t list);
* Link A: A1 --> B3 --> B4
* Link B: B1 --> B2 --> B3 --> B4
*
* @note If the second link is NULL, the next item of the first_link_item will be set to NULL. And the second_link_item_index is meaningless.
*
* @param[in] first_link First link list handle, allocated by `gdma_new_link_list`
* @param[in] first_link_item_index Index of the item in the first link list (-1 means the last item)
* @param[in] second_link Second link list handle, allocated by `gdma_new_link_list`
@@ -194,6 +196,28 @@ void* gdma_link_get_buffer(gdma_link_list_handle_t list, int item_index);
*/
size_t gdma_link_get_length(gdma_link_list_handle_t list, int item_index);
/**
* @brief Set the length of the buffer of a DMA link list item
*
* @param[in] list Link list handle, allocated by `gdma_new_link_list`
* @param[in] item_index Index of the link list item
* @param[in] length Length of the buffer of the link list item
* @return
* - ESP_OK: Set the length successfully
* - ESP_ERR_INVALID_ARG: Set the length failed because of invalid argument
* - ESP_FAIL: Set the length failed because of other error
*/
esp_err_t gdma_link_set_length(gdma_link_list_handle_t list, int item_index, size_t length);
/**
* @brief Check if a DMA link list item is the last item (has no next item)
*
* @param[in] list Link list handle, allocated by `gdma_new_link_list`
* @param[in] item_index Index of the link list item
* @return True if the link list item is the last item, false otherwise
*/
bool gdma_link_check_end(gdma_link_list_handle_t list, int item_index);
#ifdef __cplusplus
}
#endif

View File

@@ -619,7 +619,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -258,7 +258,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */
#define SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY 1 /*!< TX carrier can be modulated to data phase only */

View File

@@ -903,7 +903,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -359,7 +359,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */

View File

@@ -835,7 +835,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -331,7 +331,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */

View File

@@ -831,7 +831,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -345,7 +345,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */

View File

@@ -527,7 +527,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -319,7 +319,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */

View File

@@ -527,7 +527,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -337,7 +337,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */

View File

@@ -1207,7 +1207,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -438,7 +438,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */

View File

@@ -623,7 +623,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -270,7 +270,7 @@
#define SOC_RMT_CHANNELS_PER_GROUP 4 /*!< Total 4 channels */
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 64 /*!< Each channel owns 64 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmitting specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */
#define SOC_RMT_SUPPORT_TX_CARRIER_DATA_ONLY 1 /*!< TX carrier can be modulated to data phase only */

View File

@@ -747,7 +747,7 @@ config SOC_RMT_SUPPORT_RX_DEMODULATION
bool
default y
config SOC_RMT_SUPPORT_TX_ASYNC_STOP
config SOC_RMT_SUPPORT_ASYNC_STOP
bool
default y

View File

@@ -294,7 +294,7 @@
#define SOC_RMT_MEM_WORDS_PER_CHANNEL 48 /*!< Each channel owns 48 words memory (1 word = 4 Bytes) */
#define SOC_RMT_SUPPORT_RX_PINGPONG 1 /*!< Support Ping-Pong mode on RX path */
#define SOC_RMT_SUPPORT_RX_DEMODULATION 1 /*!< Support signal demodulation on RX path (i.e. remove carrier) */
#define SOC_RMT_SUPPORT_TX_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_ASYNC_STOP 1 /*!< Support stop transmission asynchronously */
#define SOC_RMT_SUPPORT_TX_LOOP_COUNT 1 /*!< Support transmit specified number of cycles in loop mode */
#define SOC_RMT_SUPPORT_TX_LOOP_AUTO_STOP 1 /*!< Hardware support of auto-stop in loop mode */
#define SOC_RMT_SUPPORT_TX_SYNCHRO 1 /*!< Support coordinate a group of TX channels to start simultaneously */