Merge branch 'feat/rmt_function_placement_distinguish_tx_rx' into 'master'

refactor(rmt): function placement by tx and rx separately

See merge request espressif/esp-idf!38035
This commit is contained in:
morris
2025-03-27 11:00:15 +08:00
20 changed files with 168 additions and 70 deletions

View File

@ -40,7 +40,7 @@
#define PARLIO_INTR_ALLOC_FLAG_SHARED 0
#endif
#if CONFIG_PARLIO_TX_CACHE_SAFE
#if CONFIG_PARLIO_TX_ISR_CACHE_SAFE
#define PARLIO_TX_INTR_ALLOC_FLAG (ESP_INTR_FLAG_LOWMED | PARLIO_INTR_ALLOC_FLAG_SHARED | ESP_INTR_FLAG_IRAM)
#else
#define PARLIO_TX_INTR_ALLOC_FLAG (ESP_INTR_FLAG_LOWMED | PARLIO_INTR_ALLOC_FLAG_SHARED)

View File

@ -1,30 +1,46 @@
menu "ESP-Driver:RMT Configurations"
depends on SOC_RMT_SUPPORTED
config RMT_ISR_HANDLER_IN_IRAM
bool "Place RMT ISR handler into IRAM"
select GDMA_CTRL_FUNC_IN_IRAM if SOC_RMT_SUPPORT_DMA
select RMT_OBJ_CACHE_SAFE
config RMT_TX_ISR_HANDLER_IN_IRAM
bool "Place RMT TX ISR handler in IRAM to reduce latency"
default y
select RMT_OBJ_CACHE_SAFE
help
Place RMT ISR handler into IRAM for better performance and fewer cache misses.
Place RMT TX ISR handler in IRAM to reduce latency caused by cache miss.
config RMT_RX_ISR_HANDLER_IN_IRAM
bool "Place RMT RX ISR handler in IRAM to reduce latency"
default y
select RMT_OBJ_CACHE_SAFE
help
Place RMT RX ISR handler in IRAM to reduce latency caused by cache miss.
config RMT_RECV_FUNC_IN_IRAM
bool "Place RMT receive function into IRAM"
bool "Place RMT receive function in IRAM"
default n
select GDMA_CTRL_FUNC_IN_IRAM if SOC_RMT_SUPPORT_DMA
select RMT_OBJ_CACHE_SAFE
help
Place RMT receive function into IRAM for better performance and fewer cache misses.
config RMT_ISR_CACHE_SAFE
bool "RMT ISR Cache-Safe"
config RMT_TX_ISR_CACHE_SAFE
bool "Allow RMT TX ISR to execute when cache is disabled"
select RMT_TX_ISR_HANDLER_IN_IRAM
select GDMA_ISR_IRAM_SAFE if SOC_RMT_SUPPORT_DMA
select RMT_ISR_HANDLER_IN_IRAM
default n
help
Ensure the RMT interrupt is Cache-Safe by allowing the interrupt handler to be
executable when the cache is disabled (e.g. SPI Flash write).
Enable this option to allow the RMT TX Interrupt Service Routine (ISR)
to execute even when the cache is disabled. This can be useful in scenarios where the cache
might be turned off, but the RMT TX functionality is still required to operate correctly.
config RMT_RX_ISR_CACHE_SAFE
bool "Allow RMT RX ISR to execute when cache is disabled"
select RMT_RX_ISR_HANDLER_IN_IRAM
select GDMA_ISR_IRAM_SAFE if SOC_RMT_SUPPORT_DMA
default n
help
Enable this option to allow the RMT RX Interrupt Service Routine (ISR)
to execute even when the cache is disabled. This can be useful in scenarios where the cache
might be turned off, but the RMT RX functionality is still required to operate correctly.
config RMT_OBJ_CACHE_SAFE
bool
@ -43,4 +59,13 @@ menu "ESP-Driver:RMT Configurations"
3. set the runtime log level to VERBOSE
Please enable this option by caution, as it will increase the binary size.
endmenu
config RMT_ISR_IRAM_SAFE
bool "RMT ISR IRAM-Safe (Deprecated)"
select RMT_TX_ISR_CACHE_SAFE
select RMT_RX_ISR_CACHE_SAFE
default n
help
Ensure the RMT interrupt is IRAM-Safe by allowing the interrupt handler to be
executable when the cache is disabled (e.g. SPI Flash write).
endmenu # RMT Configurations

View File

@ -18,7 +18,7 @@ extern "C" {
/**
* @brief Group of RMT RX callbacks
* @note The callbacks are all running under ISR environment
* @note When CONFIG_RMT_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* @note When CONFIG_RMT_RX_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* The variables used in the function should be in the SRAM as well.
*/
typedef struct {
@ -100,7 +100,7 @@ esp_err_t rmt_receive(rmt_channel_handle_t rx_channel, void *buffer, size_t buff
* @brief Set callbacks for RMT RX channel
*
* @note User can deregister a previously registered callback by calling this function and setting the callback member in the `cbs` structure to NULL.
* @note When CONFIG_RMT_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* @note When CONFIG_RMT_RX_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* The variables used in the function should be in the SRAM as well. The `user_data` should also reside in SRAM.
*
* @param[in] rx_channel RMT generic channel that created by `rmt_new_rx_channel()`

View File

@ -19,7 +19,7 @@ extern "C" {
/**
* @brief Group of RMT TX callbacks
* @note The callbacks are all running under ISR environment
* @note When CONFIG_RMT_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* @note When CONFIG_RMT_TX_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* The variables used in the function should be in the SRAM as well.
*/
typedef struct {
@ -128,7 +128,7 @@ esp_err_t rmt_tx_wait_all_done(rmt_channel_handle_t tx_channel, int timeout_ms);
* @brief Set event callbacks for RMT TX channel
*
* @note User can deregister a previously registered callback by calling this function and setting the callback member in the `cbs` structure to NULL.
* @note When CONFIG_RMT_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* @note When CONFIG_RMT_TX_ISR_CACHE_SAFE is enabled, the callback itself and functions called by it should be placed in IRAM.
* The variables used in the function should be in the SRAM as well. The `user_data` should also reside in SRAM.
*
* @param[in] tx_channel RMT generic channel that created by `rmt_new_tx_channel()`

View File

@ -1,13 +1,80 @@
[mapping:rmt_driver]
archive: libesp_driver_rmt.a
entries:
if RMT_ISR_HANDLER_IN_IRAM = y:
if RMT_TX_ISR_HANDLER_IN_IRAM = y:
rmt_tx: rmt_tx_default_isr (noflash)
rmt_rx: rmt_rx_default_isr (noflash)
rmt_tx: rmt_isr_handle_tx_done (noflash)
rmt_tx: rmt_isr_handle_tx_threshold (noflash)
rmt_tx: rmt_tx_do_transaction (noflash)
rmt_tx: rmt_encode_check_result (noflash)
rmt_tx: rmt_tx_mark_eof (noflash)
rmt_encoder_bytes: rmt_encode_bytes (noflash)
rmt_encoder_copy: rmt_encode_copy (noflash)
rmt_encoder_simple: rmt_encode_simple (noflash)
if SOC_RMT_SUPPORT_TX_LOOP_COUNT = y:
rmt_tx: rmt_isr_handle_tx_loop_end (noflash)
if SOC_RMT_SUPPORT_DMA = y:
rmt_tx: rmt_dma_tx_eof_cb (noflash)
if RMT_RX_ISR_HANDLER_IN_IRAM = y:
rmt_rx: rmt_rx_default_isr (noflash)
rmt_rx: rmt_isr_handle_rx_done (noflash)
if SOC_RMT_SUPPORT_RX_PINGPONG = y:
rmt_rx: rmt_isr_handle_rx_threshold (noflash)
if SOC_RMT_SUPPORT_DMA = y:
rmt_rx: rmt_dma_rx_one_block_cb (noflash)
if RMT_RECV_FUNC_IN_IRAM = y:
rmt_rx: rmt_receive (noflash)
[mapping:rmt_driver_gdma]
archive: libesp_hw_support.a
entries:
if RMT_TX_ISR_HANDLER_IN_IRAM = y && SOC_RMT_SUPPORT_DMA = y:
gdma: gdma_reset (noflash)
gdma: gdma_start (noflash)
gdma: gdma_append (noflash)
if RMT_RECV_FUNC_IN_IRAM = y && SOC_RMT_SUPPORT_DMA = y:
gdma: gdma_reset (noflash)
gdma: gdma_start (noflash)
[mapping:rmt_driver_hal]
archive: libhal.a
entries:
if RMT_TX_ISR_HANDLER_IN_IRAM = y:
if SOC_RMT_SUPPORT_DMA = y:
rmt_rx: rmt_rx_mount_dma_buffer (noflash)
gdma_hal_top: gdma_hal_append (noflash)
gdma_hal_top: gdma_hal_reset (noflash)
gdma_hal_top: gdma_hal_start_with_desc (noflash)
# GDMA implementation layer for AHB-DMA version 1
if SOC_AHB_GDMA_VERSION = 1:
gdma_hal_ahb_v1: gdma_ahb_hal_append (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_reset (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_start_with_desc (noflash)
# GDMA implementation layer for AHB-DMA version 2
if SOC_AHB_GDMA_VERSION = 2:
gdma_hal_ahb_v2: gdma_ahb_hal_append (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_reset (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_start_with_desc (noflash)
if RMT_RECV_FUNC_IN_IRAM = y:
if SOC_RMT_SUPPORT_DMA = y:
gdma_hal_top: gdma_hal_reset (noflash)
gdma_hal_top: gdma_hal_start_with_desc (noflash)
# GDMA implementation layer for AHB-DMA version 1
if SOC_AHB_GDMA_VERSION = 1:
gdma_hal_ahb_v1: gdma_ahb_hal_reset (noflash)
gdma_hal_ahb_v1: gdma_ahb_hal_start_with_desc (noflash)
# GDMA implementation layer for AHB-DMA version 2
if SOC_AHB_GDMA_VERSION = 2:
gdma_hal_ahb_v2: gdma_ahb_hal_reset (noflash)
gdma_hal_ahb_v2: gdma_ahb_hal_start_with_desc (noflash)

View File

@ -1,4 +0,0 @@
# sdkconfig replacement configurations for deprecated options formatted as
# CONFIG_DEPRECATED_OPTION CONFIG_NEW_OPTION
CONFIG_RMT_ISR_IRAM_SAFE CONFIG_RMT_ISR_CACHE_SAFE

View File

@ -344,9 +344,9 @@ bool rmt_set_intr_priority_to_group(rmt_group_t *group, int intr_priority)
return priority_conflict;
}
int rmt_get_isr_flags(rmt_group_t *group)
int rmt_isr_priority_to_flags(rmt_group_t *group)
{
int isr_flags = RMT_INTR_ALLOC_FLAG;
int isr_flags = 0;
if (group->intr_priority) {
// Use user-specified priority bit
isr_flags |= (1 << (group->intr_priority));

View File

@ -27,8 +27,8 @@ static esp_err_t rmt_bytes_encoder_reset(rmt_encoder_t *encoder)
return ESP_OK;
}
static size_t IRAM_ATTR rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t channel,
const void *primary_data, size_t data_size, rmt_encode_state_t *ret_state)
static size_t rmt_encode_bytes(rmt_encoder_t *encoder, rmt_channel_handle_t channel,
const void *primary_data, size_t data_size, rmt_encode_state_t *ret_state)
{
rmt_bytes_encoder_t *bytes_encoder = __containerof(encoder, rmt_bytes_encoder_t, base);
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);

View File

@ -19,8 +19,8 @@ static esp_err_t rmt_copy_encoder_reset(rmt_encoder_t *encoder)
return ESP_OK;
}
static size_t IRAM_ATTR rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t channel,
const void *primary_data, size_t data_size, rmt_encode_state_t *ret_state)
static size_t rmt_encode_copy(rmt_encoder_t *encoder, rmt_channel_handle_t channel,
const void *primary_data, size_t data_size, rmt_encode_state_t *ret_state)
{
rmt_copy_encoder_t *copy_encoder = __containerof(encoder, rmt_copy_encoder_t, base);
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);

View File

@ -29,8 +29,8 @@ static esp_err_t rmt_simple_encoder_reset(rmt_encoder_t *encoder)
return ESP_OK;
}
static size_t IRAM_ATTR rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t channel,
const void *data, size_t data_size, rmt_encode_state_t *ret_state)
static size_t rmt_encode_simple(rmt_encoder_t *encoder, rmt_channel_handle_t channel,
const void *data, size_t data_size, rmt_encode_state_t *ret_state)
{
rmt_simple_encoder_t *simple_encoder = __containerof(encoder, rmt_simple_encoder_t, base);
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);

View File

@ -59,10 +59,16 @@ extern "C" {
#endif
// RMT driver object is per-channel, the interrupt source is shared between channels
#if CONFIG_RMT_ISR_CACHE_SAFE
#define RMT_INTR_ALLOC_FLAG (ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_IRAM)
#if CONFIG_RMT_TX_ISR_CACHE_SAFE
#define RMT_TX_INTR_ALLOC_FLAG (ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_IRAM)
#else
#define RMT_INTR_ALLOC_FLAG (ESP_INTR_FLAG_SHARED)
#define RMT_TX_INTR_ALLOC_FLAG (ESP_INTR_FLAG_SHARED)
#endif
#if CONFIG_RMT_RX_ISR_CACHE_SAFE
#define RMT_RX_INTR_ALLOC_FLAG (ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_IRAM)
#else
#define RMT_RX_INTR_ALLOC_FLAG (ESP_INTR_FLAG_SHARED)
#endif
// Hopefully the channel offset won't change in other targets
@ -264,11 +270,11 @@ esp_err_t rmt_select_periph_clock(rmt_channel_handle_t chan, rmt_clock_source_t
bool rmt_set_intr_priority_to_group(rmt_group_t *group, int intr_priority);
/**
* @brief Get isr_flags to be passed to `esp_intr_alloc_intrstatus()` according to `intr_priority` set in RMT group
* @brief Convert the interrupt priority to flags
* @param group RMT group
* @return isr_flags
* @return isr_flags which is compatible to `ESP_INTR_FLAG_*`
*/
int rmt_get_isr_flags(rmt_group_t *group);
int rmt_isr_priority_to_flags(rmt_group_t *group);
/**
* @brief Create sleep retention link

View File

@ -22,7 +22,8 @@ static void rmt_rx_default_isr(void *args);
#if SOC_RMT_SUPPORT_DMA
static bool rmt_dma_rx_one_block_cb(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data);
static void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void *buffer, size_t buffer_size, size_t per_block_size, size_t last_block_size)
__attribute__((always_inline))
static inline void rmt_rx_mount_dma_buffer(rmt_rx_channel_t *rx_chan, const void *buffer, size_t buffer_size, size_t per_block_size, size_t last_block_size)
{
uint8_t *data = (uint8_t *)buffer;
for (int i = 0; i < rx_chan->num_dma_nodes; i++) {
@ -248,7 +249,7 @@ esp_err_t rmt_new_rx_channel(const rmt_rx_channel_config_t *config, rmt_channel_
bool priority_conflict = rmt_set_intr_priority_to_group(group, config->intr_priority);
ESP_GOTO_ON_FALSE(!priority_conflict, ESP_ERR_INVALID_ARG, err, TAG, "intr_priority conflict");
// 2-- Get interrupt allocation flag
int isr_flags = rmt_get_isr_flags(group);
int isr_flags = rmt_isr_priority_to_flags(group) | RMT_RX_INTR_ALLOC_FLAG;
// 3-- Allocate interrupt using isr_flag
ret = esp_intr_alloc_intrstatus(rmt_periph_signals.groups[group_id].irq, isr_flags,
(uint32_t)rmt_ll_get_interrupt_status_reg(hal->regs),
@ -336,7 +337,7 @@ esp_err_t rmt_rx_register_event_callbacks(rmt_channel_handle_t channel, const rm
ESP_RETURN_ON_FALSE(channel->direction == RMT_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, TAG, "invalid channel direction");
rmt_rx_channel_t *rx_chan = __containerof(channel, rmt_rx_channel_t, base);
#if CONFIG_RMT_ISR_CACHE_SAFE
#if CONFIG_RMT_RX_ISR_CACHE_SAFE
if (cbs->on_recv_done) {
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_done), ESP_ERR_INVALID_ARG, TAG, "on_recv_done callback not in IRAM");
}
@ -375,7 +376,7 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_
// check buffer alignment
uint32_t align_check_mask = mem_alignment - 1;
ESP_RETURN_ON_FALSE_ISR(((((uintptr_t)buffer) & align_check_mask) == 0) && (((buffer_size) & align_check_mask) == 0), ESP_ERR_INVALID_ARG,
TAG, "buffer address or size are not %zu bytes aligned", mem_alignment);
TAG, "buffer address or size are not %"PRIu32 "bytes aligned", mem_alignment);
rmt_group_t *group = channel->group;
rmt_hal_context_t *hal = &group->hal;
@ -560,7 +561,7 @@ static esp_err_t rmt_rx_disable(rmt_channel_handle_t channel)
return ESP_OK;
}
static bool IRAM_ATTR rmt_isr_handle_rx_done(rmt_rx_channel_t *rx_chan)
bool rmt_isr_handle_rx_done(rmt_rx_channel_t *rx_chan)
{
rmt_channel_t *channel = &rx_chan->base;
rmt_group_t *group = channel->group;
@ -663,7 +664,7 @@ static bool IRAM_ATTR rmt_isr_handle_rx_done(rmt_rx_channel_t *rx_chan)
}
#if SOC_RMT_SUPPORT_RX_PINGPONG
static bool IRAM_ATTR rmt_isr_handle_rx_threshold(rmt_rx_channel_t *rx_chan)
bool rmt_isr_handle_rx_threshold(rmt_rx_channel_t *rx_chan)
{
bool need_yield = false;
rmt_channel_t *channel = &rx_chan->base;
@ -758,7 +759,8 @@ static void rmt_rx_default_isr(void *args)
}
#if SOC_RMT_SUPPORT_DMA
static size_t IRAM_ATTR rmt_rx_count_symbols_until_eof(rmt_rx_channel_t *rx_chan, int start_index)
__attribute__((always_inline))
static inline size_t rmt_rx_count_symbols_until_eof(rmt_rx_channel_t *rx_chan, int start_index)
{
size_t received_bytes = 0;
for (int i = 0; i < rx_chan->num_dma_nodes; i++) {
@ -773,7 +775,8 @@ static size_t IRAM_ATTR rmt_rx_count_symbols_until_eof(rmt_rx_channel_t *rx_chan
return received_bytes / sizeof(rmt_symbol_word_t);
}
static size_t IRAM_ATTR rmt_rx_count_symbols_for_single_block(rmt_rx_channel_t *rx_chan, int desc_index)
__attribute__((always_inline))
static inline size_t rmt_rx_count_symbols_for_single_block(rmt_rx_channel_t *rx_chan, int desc_index)
{
size_t received_bytes = rx_chan->dma_nodes_nc[desc_index].dw0.length;
received_bytes = ALIGN_UP(received_bytes, sizeof(rmt_symbol_word_t));

View File

@ -297,7 +297,7 @@ esp_err_t rmt_new_tx_channel(const rmt_tx_channel_config_t *config, rmt_channel_
bool priority_conflict = rmt_set_intr_priority_to_group(group, config->intr_priority);
ESP_GOTO_ON_FALSE(!priority_conflict, ESP_ERR_INVALID_ARG, err, TAG, "intr_priority conflict");
// 2-- Get interrupt allocation flag
int isr_flags = rmt_get_isr_flags(group);
int isr_flags = rmt_isr_priority_to_flags(group) | RMT_TX_INTR_ALLOC_FLAG;
// 3-- Allocate interrupt using isr_flag
ret = esp_intr_alloc_intrstatus(rmt_periph_signals.groups[group_id].irq, isr_flags,
(uint32_t) rmt_ll_get_interrupt_status_reg(hal->regs),
@ -499,7 +499,7 @@ esp_err_t rmt_tx_register_event_callbacks(rmt_channel_handle_t channel, const rm
ESP_RETURN_ON_FALSE(channel->direction == RMT_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, TAG, "invalid channel direction");
rmt_tx_channel_t *tx_chan = __containerof(channel, rmt_tx_channel_t, base);
#if CONFIG_RMT_ISR_CACHE_SAFE
#if CONFIG_RMT_TX_ISR_CACHE_SAFE
if (cbs->on_trans_done) {
ESP_RETURN_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_done), ESP_ERR_INVALID_ARG, TAG, "on_trans_done callback not in IRAM");
}
@ -520,7 +520,7 @@ esp_err_t rmt_transmit(rmt_channel_handle_t channel, rmt_encoder_t *encoder, con
#if !SOC_RMT_SUPPORT_TX_LOOP_COUNT
ESP_RETURN_ON_FALSE(config->loop_count <= 0, ESP_ERR_NOT_SUPPORTED, TAG, "loop count is not supported");
#endif // !SOC_RMT_SUPPORT_TX_LOOP_COUNT
#if CONFIG_RMT_ISR_CACHE_SAFE
#if CONFIG_RMT_TX_ISR_CACHE_SAFE
// payload is retrieved by the encoder, we should make sure it's still accessible even when the cache is disabled
ESP_RETURN_ON_FALSE(esp_ptr_internal(payload), ESP_ERR_INVALID_ARG, TAG, "payload not in internal RAM");
#endif
@ -589,7 +589,7 @@ esp_err_t rmt_tx_wait_all_done(rmt_channel_handle_t channel, int timeout_ms)
return ESP_OK;
}
static void IRAM_ATTR rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan)
static void rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan)
{
rmt_channel_t *channel = &tx_chan->base;
rmt_group_t *group = channel->group;
@ -632,7 +632,7 @@ static void IRAM_ATTR rmt_tx_mark_eof(rmt_tx_channel_t *tx_chan)
}
}
static size_t IRAM_ATTR rmt_encode_check_result(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t *t)
size_t rmt_encode_check_result(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t *t)
{
rmt_encode_state_t encode_state = RMT_ENCODING_RESET;
rmt_encoder_handle_t encoder = t->encoder;
@ -657,7 +657,7 @@ static size_t IRAM_ATTR rmt_encode_check_result(rmt_tx_channel_t *tx_chan, rmt_t
return encoded_symbols;
}
static void IRAM_ATTR rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t *t)
static void rmt_tx_do_transaction(rmt_tx_channel_t *tx_chan, rmt_tx_trans_desc_t *t)
{
rmt_channel_t *channel = &tx_chan->base;
rmt_group_t *group = channel->group;
@ -892,7 +892,7 @@ static esp_err_t rmt_tx_modulate_carrier(rmt_channel_handle_t channel, const rmt
return ESP_OK;
}
static bool IRAM_ATTR rmt_isr_handle_tx_threshold(rmt_tx_channel_t *tx_chan)
bool rmt_isr_handle_tx_threshold(rmt_tx_channel_t *tx_chan)
{
// continue ping-pong transmission
rmt_tx_trans_desc_t *t = tx_chan->cur_trans;
@ -910,7 +910,7 @@ static bool IRAM_ATTR rmt_isr_handle_tx_threshold(rmt_tx_channel_t *tx_chan)
return false;
}
static bool IRAM_ATTR rmt_isr_handle_tx_done(rmt_tx_channel_t *tx_chan)
bool rmt_isr_handle_tx_done(rmt_tx_channel_t *tx_chan)
{
rmt_channel_t *channel = &tx_chan->base;
BaseType_t awoken = pdFALSE;
@ -961,7 +961,7 @@ static bool IRAM_ATTR rmt_isr_handle_tx_done(rmt_tx_channel_t *tx_chan)
}
#if SOC_RMT_SUPPORT_TX_LOOP_COUNT
static bool IRAM_ATTR rmt_isr_handle_tx_loop_end(rmt_tx_channel_t *tx_chan)
bool rmt_isr_handle_tx_loop_end(rmt_tx_channel_t *tx_chan)
{
rmt_channel_t *channel = &tx_chan->base;
rmt_group_t *group = channel->group;

View File

@ -4,7 +4,7 @@ set(srcs "test_app_main.c"
"test_rmt_rx.c"
"test_util_rmt_encoders.c")
if(CONFIG_RMT_ISR_CACHE_SAFE)
if(CONFIG_RMT_TX_ISR_CACHE_SAFE AND CONFIG_RMT_RX_ISR_CACHE_SAFE)
list(APPEND srcs "test_rmt_cache_safe.c")
endif()

View File

@ -23,7 +23,7 @@ static void IRAM_ATTR test_delay_post_cache_disable(void *args)
esp_rom_delay_us(10000);
}
static void test_rmt_tx_iram_safe(size_t mem_block_symbols, bool with_dma)
static void test_rmt_tx_cache_safe(size_t mem_block_symbols, bool with_dma)
{
rmt_tx_channel_config_t tx_channel_cfg = {
.mem_block_symbols = mem_block_symbols,
@ -81,9 +81,9 @@ static void test_rmt_tx_iram_safe(size_t mem_block_symbols, bool with_dma)
TEST_CASE("rmt tx works with cache disabled", "[rmt]")
{
test_rmt_tx_iram_safe(SOC_RMT_MEM_WORDS_PER_CHANNEL, false);
test_rmt_tx_cache_safe(SOC_RMT_MEM_WORDS_PER_CHANNEL, false);
#if SOC_RMT_SUPPORT_DMA
test_rmt_tx_iram_safe(1024, true);
test_rmt_tx_cache_safe(1024, true);
#endif
}
@ -128,7 +128,7 @@ static bool test_rmt_rx_done_callback(rmt_channel_handle_t channel, const rmt_rx
return high_task_wakeup == pdTRUE;
}
static void test_rmt_rx_iram_safe(size_t mem_block_symbols, bool with_dma, rmt_clock_source_t clk_src)
static void test_rmt_rx_cache_safe(size_t mem_block_symbols, bool with_dma, rmt_clock_source_t clk_src)
{
uint32_t const test_rx_buffer_symbols = 128;
rmt_symbol_word_t *remote_codes = heap_caps_aligned_calloc(64, test_rx_buffer_symbols, sizeof(rmt_symbol_word_t),
@ -192,8 +192,8 @@ static void test_rmt_rx_iram_safe(size_t mem_block_symbols, bool with_dma, rmt_c
TEST_CASE("rmt rx works with cache disabled", "[rmt]")
{
test_rmt_rx_iram_safe(SOC_RMT_MEM_WORDS_PER_CHANNEL, false, RMT_CLK_SRC_DEFAULT);
test_rmt_rx_cache_safe(SOC_RMT_MEM_WORDS_PER_CHANNEL, false, RMT_CLK_SRC_DEFAULT);
#if SOC_RMT_SUPPORT_DMA
test_rmt_rx_iram_safe(128, true, RMT_CLK_SRC_DEFAULT);
test_rmt_rx_cache_safe(128, true, RMT_CLK_SRC_DEFAULT);
#endif
}

View File

@ -16,7 +16,7 @@
#include "test_util_rmt_encoders.h"
#include "test_board.h"
#if CONFIG_RMT_ISR_CACHE_SAFE
#if CONFIG_RMT_RX_ISR_CACHE_SAFE
#define TEST_RMT_CALLBACK_ATTR IRAM_ATTR
#else
#define TEST_RMT_CALLBACK_ATTR

View File

@ -16,7 +16,7 @@
#include "test_util_rmt_encoders.h"
#include "test_board.h"
#if CONFIG_RMT_ISR_CACHE_SAFE
#if CONFIG_RMT_TX_ISR_CACHE_SAFE
#define TEST_RMT_CALLBACK_ATTR IRAM_ATTR
#else
#define TEST_RMT_CALLBACK_ATTR

View File

@ -1,5 +1,6 @@
CONFIG_COMPILER_DUMP_RTL_FILES=y
CONFIG_RMT_ISR_CACHE_SAFE=y
CONFIG_RMT_TX_ISR_CACHE_SAFE=y
CONFIG_RMT_RX_ISR_CACHE_SAFE=y
CONFIG_RMT_RECV_FUNC_IN_IRAM=y
CONFIG_GPIO_CTRL_FUNC_IN_IRAM=y
CONFIG_COMPILER_OPTIMIZATION_NONE=y

View File

@ -567,7 +567,7 @@ Cache Safe
By default, the RMT interrupt is deferred when the Cache is disabled for reasons like writing or erasing the main Flash. Thus the transaction-done interrupt does not get handled in time, which is not acceptable in a real-time application. What is worse, when the RMT transaction relies on **ping-pong** interrupt to successively encode or copy RMT symbols, a delayed interrupt can lead to an unpredictable result.
There is a Kconfig option :ref:`CONFIG_RMT_ISR_CACHE_SAFE` that has the following features:
There is a Kconfig option :ref:`CONFIG_RMT_TX_ISR_CACHE_SAFE` and :ref:`CONFIG_RMT_RX_ISR_CACHE_SAFE` that has the following features:
1. Enable the interrupt being serviced even when the cache is disabled
2. Place all functions used by the ISR into IRAM [2]_
@ -594,7 +594,7 @@ The following functions are allowed to use under ISR context as well.
Kconfig Options
^^^^^^^^^^^^^^^
- :ref:`CONFIG_RMT_ISR_CACHE_SAFE` controls whether the default ISR handler can work when cache is disabled, see also :ref:`rmt-cache-safe` for more information.
- :ref:`CONFIG_RMT_TX_ISR_CACHE_SAFE` and :ref:`CONFIG_RMT_RX_ISR_CACHE_SAFE` control whether the default ISR handler can work when cache is disabled, see also :ref:`rmt-cache-safe` for more information.
- :ref:`CONFIG_RMT_ENABLE_DEBUG_LOG` is used to enable the debug log at the cost of increased firmware binary size.
- :ref:`CONFIG_RMT_RECV_FUNC_IN_IRAM` controls where to place the RMT receive function (IRAM or Flash), see :ref:`rmt-cache-safe` for more information.

View File

@ -567,7 +567,7 @@ Cache 安全
默认情况下,禁用 cache 时,写入/擦除主 flash 等原因将导致 RMT 中断延迟,事件回调函数也将延迟执行。在实时应用程序中,应避免此类情况。此外,当 RMT 事务依赖 **交替** 中断连续编码或复制 RMT 符号时,上述中断延迟将导致不可预测的结果。
因此,可以启用 Kconfig 选项 :ref:`CONFIG_RMT_ISR_CACHE_SAFE`,该选项:
因此,可以启用 Kconfig 选项 :ref:`CONFIG_RMT_TX_ISR_CACHE_SAFE`:ref:`CONFIG_RMT_RX_ISR_CACHE_SAFE`,该选项:
1. 支持在禁用 cache 时启用所需中断
2. 支持将 ISR 使用的所有函数存放在 IRAM 中 [2]_
@ -594,7 +594,7 @@ RMT 驱动程序会确保工厂函数 :cpp:func:`rmt_new_tx_channel`、:cpp:func
Kconfig 选项
^^^^^^^^^^^^^^^
- :ref:`CONFIG_RMT_ISR_CACHE_SAFE` 控制默认 ISR 处理程序能否在禁用 cache 的情况下工作。详情请参阅 :ref:`rmt-cache-safe`
- :ref:`CONFIG_RMT_TX_ISR_CACHE_SAFE`:ref:`CONFIG_RMT_RX_ISR_CACHE_SAFE` 控制默认 ISR 处理程序能否在禁用 cache 的情况下工作。详情请参阅 :ref:`rmt-cache-safe`
- :ref:`CONFIG_RMT_ENABLE_DEBUG_LOG` 用于启用调试日志输出,启用此选项将增加固件的二进制文件大小。
- :ref:`CONFIG_RMT_RECV_FUNC_IN_IRAM` 用于控制 RMT 接收函数被链接到系统存储的哪个位置IRAM 还是 Flash。详情请参阅 :ref:`rmt-cache-safe`