2022-04-07 11:59:46 +08:00
/*
2025-01-03 18:39:40 +08:00
* SPDX - FileCopyrightText : 2022 - 2025 Espressif Systems ( Shanghai ) CO LTD
2022-04-07 11:59:46 +08:00
*
* SPDX - License - Identifier : Apache - 2.0
*/
2022-07-21 19:14:41 +08:00
# include "esp_memory_utils.h"
2024-01-05 11:18:03 +08:00
# include "esp_cache.h"
2022-04-07 11:59:46 +08:00
# include "esp_rom_gpio.h"
# include "soc/rmt_periph.h"
# include "driver/gpio.h"
# include "driver/rmt_rx.h"
# include "rmt_private.h"
static esp_err_t rmt_del_rx_channel ( rmt_channel_handle_t channel ) ;
static esp_err_t rmt_rx_demodulate_carrier ( rmt_channel_handle_t channel , const rmt_carrier_config_t * config ) ;
static esp_err_t rmt_rx_enable ( rmt_channel_handle_t channel ) ;
static esp_err_t rmt_rx_disable ( rmt_channel_handle_t channel ) ;
static void rmt_rx_default_isr ( void * args ) ;
# if SOC_RMT_SUPPORT_DMA
2023-11-02 18:51:33 +08:00
static bool rmt_dma_rx_one_block_cb ( gdma_channel_handle_t dma_chan , gdma_event_data_t * event_data , void * user_data ) ;
2022-04-07 11:59:46 +08:00
2025-03-26 14:09:14 +08:00
__attribute__ ( ( always_inline ) )
static inline void rmt_rx_mount_dma_buffer ( rmt_rx_channel_t * rx_chan , const void * buffer , size_t buffer_size , size_t per_block_size , size_t last_block_size )
2022-04-07 11:59:46 +08:00
{
uint8_t * data = ( uint8_t * ) buffer ;
2023-11-02 18:51:33 +08:00
for ( int i = 0 ; i < rx_chan - > num_dma_nodes ; i + + ) {
rmt_dma_descriptor_t * desc_nc = & rx_chan - > dma_nodes_nc [ i ] ;
desc_nc - > buffer = data + i * per_block_size ;
desc_nc - > dw0 . owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA ;
desc_nc - > dw0 . suc_eof = 0 ;
desc_nc - > dw0 . length = 0 ;
desc_nc - > dw0 . size = per_block_size ;
}
rx_chan - > dma_nodes_nc [ rx_chan - > num_dma_nodes - 1 ] . dw0 . size = last_block_size ;
2022-04-07 11:59:46 +08:00
}
static esp_err_t rmt_rx_init_dma_link ( rmt_rx_channel_t * rx_channel , const rmt_rx_channel_config_t * config )
{
gdma_channel_alloc_config_t dma_chan_config = {
. direction = GDMA_CHANNEL_DIRECTION_RX ,
2025-04-14 18:19:36 +08:00
# if CONFIG_RMT_RX_ISR_CACHE_SAFE
. flags . isr_cache_safe = true ,
# endif
2022-04-07 11:59:46 +08:00
} ;
2023-07-11 16:32:54 +08:00
ESP_RETURN_ON_ERROR ( gdma_new_ahb_channel ( & dma_chan_config , & rx_channel - > base . dma_chan ) , TAG , " allocate RX DMA channel failed " ) ;
2024-06-16 14:09:04 +08:00
gdma_transfer_config_t transfer_cfg = {
. access_ext_mem = false , // [IDF-8997]: PSRAM is not supported yet
. max_data_burst_size = 32 ,
} ;
ESP_RETURN_ON_ERROR ( gdma_config_transfer ( rx_channel - > base . dma_chan , & transfer_cfg ) , TAG , " config DMA transfer failed " ) ;
// get the alignment requirement from DMA
gdma_get_alignment_constraints ( rx_channel - > base . dma_chan , & rx_channel - > dma_int_mem_alignment , NULL ) ;
2023-11-02 18:51:33 +08:00
// register event callbacks
2022-04-07 11:59:46 +08:00
gdma_rx_event_callbacks_t cbs = {
2023-11-02 18:51:33 +08:00
. on_recv_done = rmt_dma_rx_one_block_cb ,
2022-04-07 11:59:46 +08:00
} ;
2024-05-16 11:25:38 +08:00
// register the DMA callbacks may fail if the interrupt service can not be installed successfully
ESP_RETURN_ON_ERROR ( gdma_register_rx_event_callbacks ( rx_channel - > base . dma_chan , & cbs , rx_channel ) , TAG , " register DMA callbacks failed " ) ;
2024-06-16 14:09:04 +08:00
// circular DMA descriptor
for ( int i = 0 ; i < rx_channel - > num_dma_nodes - 1 ; i + + ) {
rx_channel - > dma_nodes_nc [ i ] . next = & rx_channel - > dma_nodes [ i + 1 ] ;
}
rx_channel - > dma_nodes_nc [ rx_channel - > num_dma_nodes - 1 ] . next = & rx_channel - > dma_nodes [ 0 ] ;
2022-04-07 11:59:46 +08:00
return ESP_OK ;
}
# endif // SOC_RMT_SUPPORT_DMA
static esp_err_t rmt_rx_register_to_group ( rmt_rx_channel_t * rx_channel , const rmt_rx_channel_config_t * config )
{
size_t mem_block_num = 0 ;
// start to search for a free channel
// a channel can take up its neighbour's memory block, so the neighbour channel won't work, we should skip these "invaded" ones
int channel_scan_start = RMT_RX_CHANNEL_OFFSET_IN_GROUP ;
int channel_scan_end = RMT_RX_CHANNEL_OFFSET_IN_GROUP + SOC_RMT_RX_CANDIDATES_PER_GROUP ;
if ( config - > flags . with_dma ) {
// for DMA mode, the memory block number is always 1; for non-DMA mode, memory block number is configured by user
mem_block_num = 1 ;
// Only the last channel has the DMA capability
channel_scan_start = RMT_RX_CHANNEL_OFFSET_IN_GROUP + SOC_RMT_RX_CANDIDATES_PER_GROUP - 1 ;
rx_channel - > ping_pong_symbols = 0 ; // with DMA, we don't need to do ping-pong
} else {
// one channel can occupy multiple memory blocks
mem_block_num = config - > mem_block_symbols / SOC_RMT_MEM_WORDS_PER_CHANNEL ;
if ( mem_block_num * SOC_RMT_MEM_WORDS_PER_CHANNEL < config - > mem_block_symbols ) {
mem_block_num + + ;
}
rx_channel - > ping_pong_symbols = mem_block_num * SOC_RMT_MEM_WORDS_PER_CHANNEL / 2 ;
}
rx_channel - > base . mem_block_num = mem_block_num ;
// search free channel and then register to the group
// memory blocks used by one channel must be continuous
uint32_t channel_mask = ( 1 < < mem_block_num ) - 1 ;
rmt_group_t * group = NULL ;
int channel_id = - 1 ;
for ( int i = 0 ; i < SOC_RMT_GROUPS ; i + + ) {
group = rmt_acquire_group_handle ( i ) ;
ESP_RETURN_ON_FALSE ( group , ESP_ERR_NO_MEM , TAG , " no mem for group (%d) " , i ) ;
portENTER_CRITICAL ( & group - > spinlock ) ;
for ( int j = channel_scan_start ; j < channel_scan_end ; j + + ) {
if ( ! ( group - > occupy_mask & ( channel_mask < < j ) ) ) {
group - > occupy_mask | = ( channel_mask < < j ) ;
// the channel ID should index from 0
channel_id = j - RMT_RX_CHANNEL_OFFSET_IN_GROUP ;
group - > rx_channels [ channel_id ] = rx_channel ;
break ;
}
}
portEXIT_CRITICAL ( & group - > spinlock ) ;
if ( channel_id < 0 ) {
// didn't find a capable channel in the group, don't forget to release the group handle
rmt_release_group_handle ( group ) ;
} else {
rx_channel - > base . channel_id = channel_id ;
rx_channel - > base . channel_mask = channel_mask ;
rx_channel - > base . group = group ;
break ;
}
}
ESP_RETURN_ON_FALSE ( channel_id > = 0 , ESP_ERR_NOT_FOUND , TAG , " no free rx channels " ) ;
return ESP_OK ;
}
static void rmt_rx_unregister_from_group ( rmt_channel_t * channel , rmt_group_t * group )
{
portENTER_CRITICAL ( & group - > spinlock ) ;
group - > rx_channels [ channel - > channel_id ] = NULL ;
group - > occupy_mask & = ~ ( channel - > channel_mask < < ( channel - > channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP ) ) ;
portEXIT_CRITICAL ( & group - > spinlock ) ;
// channel has a reference on group, release it now
rmt_release_group_handle ( group ) ;
}
2023-02-16 16:33:42 +08:00
static esp_err_t rmt_rx_destroy ( rmt_rx_channel_t * rx_channel )
2022-04-07 11:59:46 +08:00
{
2024-02-20 16:21:25 +08:00
if ( rx_channel - > base . gpio_num > = 0 ) {
2024-09-20 19:17:29 +08:00
int group_id = rx_channel - > base . group - > group_id ;
int channel_id = rx_channel - > base . channel_id ;
esp_rom_gpio_connect_in_signal ( GPIO_MATRIX_CONST_ZERO_INPUT ,
rmt_periph_signals . groups [ group_id ] . channels [ channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP ] . rx_sig ,
false ) ;
2024-02-20 16:21:25 +08:00
}
2022-04-07 11:59:46 +08:00
if ( rx_channel - > base . intr ) {
ESP_RETURN_ON_ERROR ( esp_intr_free ( rx_channel - > base . intr ) , TAG , " delete interrupt service failed " ) ;
}
2025-05-16 17:38:22 +08:00
# if CONFIG_PM_ENABLE
2022-04-07 11:59:46 +08:00
if ( rx_channel - > base . pm_lock ) {
ESP_RETURN_ON_ERROR ( esp_pm_lock_delete ( rx_channel - > base . pm_lock ) , TAG , " delete pm_lock failed " ) ;
}
2025-05-16 17:38:22 +08:00
# endif
2022-04-07 11:59:46 +08:00
# if SOC_RMT_SUPPORT_DMA
if ( rx_channel - > base . dma_chan ) {
ESP_RETURN_ON_ERROR ( gdma_del_channel ( rx_channel - > base . dma_chan ) , TAG , " delete dma channel failed " ) ;
}
# endif // SOC_RMT_SUPPORT_DMA
if ( rx_channel - > base . group ) {
// de-register channel from RMT group
rmt_rx_unregister_from_group ( & rx_channel - > base , rx_channel - > base . group ) ;
}
2023-08-01 17:32:26 +08:00
if ( rx_channel - > dma_nodes ) {
free ( rx_channel - > dma_nodes ) ;
}
2022-04-07 11:59:46 +08:00
free ( rx_channel ) ;
return ESP_OK ;
}
esp_err_t rmt_new_rx_channel ( const rmt_rx_channel_config_t * config , rmt_channel_handle_t * ret_chan )
{
esp_err_t ret = ESP_OK ;
rmt_rx_channel_t * rx_channel = NULL ;
2023-08-29 17:56:03 +08:00
// Check if priority is valid
if ( config - > intr_priority ) {
2024-02-20 16:21:25 +08:00
ESP_RETURN_ON_FALSE ( ( config - > intr_priority ) > 0 , ESP_ERR_INVALID_ARG , TAG , " invalid interrupt priority:%d " , config - > intr_priority ) ;
ESP_RETURN_ON_FALSE ( 1 < < ( config - > intr_priority ) & RMT_ALLOW_INTR_PRIORITY_MASK , ESP_ERR_INVALID_ARG , TAG , " invalid interrupt priority:%d " , config - > intr_priority ) ;
2023-08-29 17:56:03 +08:00
}
2024-02-20 16:21:25 +08:00
ESP_RETURN_ON_FALSE ( config & & ret_chan & & config - > resolution_hz , ESP_ERR_INVALID_ARG , TAG , " invalid argument " ) ;
ESP_RETURN_ON_FALSE ( GPIO_IS_VALID_GPIO ( config - > gpio_num ) , ESP_ERR_INVALID_ARG , TAG , " invalid GPIO number %d " , config - > gpio_num ) ;
ESP_RETURN_ON_FALSE ( ( config - > mem_block_symbols & 0x01 ) = = 0 & & config - > mem_block_symbols > = SOC_RMT_MEM_WORDS_PER_CHANNEL ,
ESP_ERR_INVALID_ARG , TAG , " mem_block_symbols must be even and at least %d " , SOC_RMT_MEM_WORDS_PER_CHANNEL ) ;
2025-04-23 15:37:54 +08:00
# if SOC_RMT_SUPPORT_DMA
if ( config - > flags . with_dma ) {
# if CONFIG_RMT_RX_ISR_CACHE_SAFE && (!CONFIG_GDMA_ISR_HANDLER_IN_IRAM || !CONFIG_GDMA_CTRL_FUNC_IN_IRAM)
ESP_RETURN_ON_FALSE ( false , ESP_ERR_INVALID_STATE , TAG , " CONFIG_GDMA_ISR_HANDLER_IN_IRAM and CONFIG_GDMA_CTRL_FUNC_IN_IRAM must be enabled " ) ;
# endif
}
# else
2024-02-20 16:21:25 +08:00
ESP_RETURN_ON_FALSE ( config - > flags . with_dma = = 0 , ESP_ERR_NOT_SUPPORTED , TAG , " DMA not supported " ) ;
2022-04-07 11:59:46 +08:00
# endif // SOC_RMT_SUPPORT_DMA
2024-05-23 11:00:58 +08:00
# if !SOC_RMT_SUPPORT_SLEEP_RETENTION
2024-09-20 15:58:25 +08:00
ESP_RETURN_ON_FALSE ( config - > flags . allow_pd = = 0 , ESP_ERR_NOT_SUPPORTED , TAG , " not able to power down in light sleep " ) ;
2024-05-23 11:00:58 +08:00
# endif // SOC_RMT_SUPPORT_SLEEP_RETENTION
2024-04-15 18:58:53 +08:00
2022-04-07 11:59:46 +08:00
// malloc channel memory
uint32_t mem_caps = RMT_MEM_ALLOC_CAPS ;
2023-08-01 17:32:26 +08:00
rx_channel = heap_caps_calloc ( 1 , sizeof ( rmt_rx_channel_t ) , mem_caps ) ;
ESP_GOTO_ON_FALSE ( rx_channel , ESP_ERR_NO_MEM , err , TAG , " no mem for rx channel " ) ;
2024-02-20 16:21:25 +08:00
// gpio is not configured yet
rx_channel - > base . gpio_num = - 1 ;
2024-06-16 14:09:04 +08:00
# if SOC_RMT_SUPPORT_DMA
2023-08-01 17:32:26 +08:00
// create DMA descriptor
size_t num_dma_nodes = 0 ;
2022-04-07 11:59:46 +08:00
if ( config - > flags . with_dma ) {
2024-06-16 14:09:04 +08:00
// DMA descriptors must be placed in internal SRAM
2022-04-07 11:59:46 +08:00
mem_caps | = MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA ;
2024-06-16 14:09:04 +08:00
num_dma_nodes = config - > mem_block_symbols * sizeof ( rmt_symbol_word_t ) / DMA_DESCRIPTOR_BUFFER_MAX_SIZE + 1 ;
2023-11-02 18:51:33 +08:00
num_dma_nodes = MAX ( 2 , num_dma_nodes ) ; // at least 2 DMA nodes for ping-pong
2024-06-16 14:09:04 +08:00
rmt_dma_descriptor_t * dma_nodes = heap_caps_aligned_calloc ( RMT_DMA_DESC_ALIGN , num_dma_nodes , sizeof ( rmt_dma_descriptor_t ) , mem_caps ) ;
2024-01-12 16:38:30 +08:00
ESP_GOTO_ON_FALSE ( dma_nodes , ESP_ERR_NO_MEM , err , TAG , " no mem for rx channel DMA nodes " ) ;
rx_channel - > dma_nodes = dma_nodes ;
// do memory sync only when the data cache exists
2024-06-16 14:09:04 +08:00
uint32_t data_cache_line_size = cache_hal_get_cache_line_size ( CACHE_LL_LEVEL_INT_MEM , CACHE_TYPE_DATA ) ;
2024-01-12 16:38:30 +08:00
if ( data_cache_line_size ) {
2024-06-16 14:09:04 +08:00
// write back and then invalidate the cached dma_nodes, because later the DMA nodes are accessed by non-cacheable address
ESP_GOTO_ON_ERROR ( esp_cache_msync ( dma_nodes , num_dma_nodes * sizeof ( rmt_dma_descriptor_t ) ,
ESP_CACHE_MSYNC_FLAG_DIR_C2M | ESP_CACHE_MSYNC_FLAG_INVALIDATE | ESP_CACHE_MSYNC_FLAG_UNALIGNED ) ,
2024-01-12 16:38:30 +08:00
err , TAG , " cache sync failed " ) ;
}
2023-08-01 17:32:26 +08:00
// we will use the non-cached address to manipulate the DMA descriptor, for simplicity
2024-01-12 16:38:30 +08:00
rx_channel - > dma_nodes_nc = ( rmt_dma_descriptor_t * ) RMT_GET_NON_CACHE_ADDR ( dma_nodes ) ;
2022-04-07 11:59:46 +08:00
}
rx_channel - > num_dma_nodes = num_dma_nodes ;
2024-06-16 14:09:04 +08:00
# endif // SOC_RMT_SUPPORT_DMA
2022-04-07 11:59:46 +08:00
// register the channel to group
ESP_GOTO_ON_ERROR ( rmt_rx_register_to_group ( rx_channel , config ) , err , TAG , " register channel failed " ) ;
rmt_group_t * group = rx_channel - > base . group ;
rmt_hal_context_t * hal = & group - > hal ;
int channel_id = rx_channel - > base . channel_id ;
int group_id = group - > group_id ;
2024-04-15 18:58:53 +08:00
# if RMT_USE_RETENTION_LINK
2024-09-20 15:58:25 +08:00
if ( config - > flags . allow_pd ! = 0 ) {
2024-04-15 18:58:53 +08:00
rmt_create_retention_module ( group ) ;
}
# endif // RMT_USE_RETENTION_LINK
2022-04-07 11:59:46 +08:00
// reset channel, make sure the RX engine is not working, and events are cleared
portENTER_CRITICAL ( & group - > spinlock ) ;
rmt_hal_rx_channel_reset ( & group - > hal , channel_id ) ;
portEXIT_CRITICAL ( & group - > spinlock ) ;
// When channel receives an end-maker, a DMA in_suc_eof interrupt will be generated
// So we don't rely on RMT interrupt any more, GDMA event callback is sufficient
if ( config - > flags . with_dma ) {
# if SOC_RMT_SUPPORT_DMA
ESP_GOTO_ON_ERROR ( rmt_rx_init_dma_link ( rx_channel , config ) , err , TAG , " install rx DMA failed " ) ;
# endif // SOC_RMT_SUPPORT_DMA
} else {
// RMT interrupt is mandatory if the channel doesn't use DMA
2023-08-29 17:56:03 +08:00
// --- install interrupt service
// interrupt is mandatory to run basic RMT transactions, so it's not lazy installed in `rmt_tx_register_event_callbacks()`
// 1-- Set user specified priority to `group->intr_priority`
bool priority_conflict = rmt_set_intr_priority_to_group ( group , config - > intr_priority ) ;
ESP_GOTO_ON_FALSE ( ! priority_conflict , ESP_ERR_INVALID_ARG , err , TAG , " intr_priority conflict " ) ;
// 2-- Get interrupt allocation flag
2025-03-26 14:09:14 +08:00
int isr_flags = rmt_isr_priority_to_flags ( group ) | RMT_RX_INTR_ALLOC_FLAG ;
2023-08-29 17:56:03 +08:00
// 3-- Allocate interrupt using isr_flag
2022-04-07 11:59:46 +08:00
ret = esp_intr_alloc_intrstatus ( rmt_periph_signals . groups [ group_id ] . irq , isr_flags ,
( uint32_t ) rmt_ll_get_interrupt_status_reg ( hal - > regs ) ,
RMT_LL_EVENT_RX_MASK ( channel_id ) , rmt_rx_default_isr , rx_channel , & rx_channel - > base . intr ) ;
ESP_GOTO_ON_ERROR ( ret , err , TAG , " install rx interrupt failed " ) ;
}
2025-01-03 18:39:40 +08:00
rx_channel - > base . direction = RMT_CHANNEL_DIRECTION_RX ;
// select the clock source and set clock resolution
ESP_GOTO_ON_ERROR ( rmt_select_periph_clock ( & rx_channel - > base , config - > clk_src , config - > resolution_hz ) , err , TAG , " set clock resolution failed " ) ;
2022-04-07 11:59:46 +08:00
2024-04-02 18:21:45 +08:00
rx_channel - > filter_clock_resolution_hz = group - > resolution_hz ;
// On esp32 and esp32s2, the counting clock used by the RX filter always comes from APB clock
// no matter what the clock source is used by the RMT channel as the "core" clock
# if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S2
esp_clk_tree_src_get_freq_hz ( SOC_MOD_CLK_APB , ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED , & rx_channel - > filter_clock_resolution_hz ) ;
# endif
2022-04-07 11:59:46 +08:00
rmt_ll_rx_set_mem_blocks ( hal - > regs , channel_id , rx_channel - > base . mem_block_num ) ;
rmt_ll_rx_set_mem_owner ( hal - > regs , channel_id , RMT_LL_MEM_OWNER_HW ) ;
# if SOC_RMT_SUPPORT_RX_PINGPONG
rmt_ll_rx_set_limit ( hal - > regs , channel_id , rx_channel - > ping_pong_symbols ) ;
// always enable rx wrap, both DMA mode and ping-pong mode rely this feature
rmt_ll_rx_enable_wrap ( hal - > regs , channel_id , true ) ;
# endif
# if SOC_RMT_SUPPORT_RX_DEMODULATION
2024-03-25 14:11:33 +08:00
// disable carrier demodulation by default, can re-enable by `rmt_apply_carrier()`
2022-04-07 11:59:46 +08:00
rmt_ll_rx_enable_carrier_demodulation ( hal - > regs , channel_id , false ) ;
# endif
// GPIO Matrix/MUX configuration
2024-09-20 19:17:29 +08:00
gpio_func_sel ( config - > gpio_num , PIN_FUNC_GPIO ) ;
gpio_input_enable ( config - > gpio_num ) ;
2022-04-07 11:59:46 +08:00
esp_rom_gpio_connect_in_signal ( config - > gpio_num ,
rmt_periph_signals . groups [ group_id ] . channels [ channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP ] . rx_sig ,
config - > flags . invert_in ) ;
2024-09-20 19:17:29 +08:00
rx_channel - > base . gpio_num = config - > gpio_num ;
2022-04-07 11:59:46 +08:00
// initialize other members of rx channel
2023-10-18 15:50:58 +08:00
portMUX_INITIALIZE ( & rx_channel - > base . spinlock ) ;
atomic_init ( & rx_channel - > base . fsm , RMT_FSM_INIT ) ;
2022-05-08 19:30:41 +08:00
rx_channel - > base . hw_mem_base = & RMTMEM . channels [ channel_id + RMT_RX_CHANNEL_OFFSET_IN_GROUP ] . symbols [ 0 ] ;
2022-04-07 11:59:46 +08:00
// polymorphic methods
rx_channel - > base . del = rmt_del_rx_channel ;
rx_channel - > base . set_carrier_action = rmt_rx_demodulate_carrier ;
rx_channel - > base . enable = rmt_rx_enable ;
rx_channel - > base . disable = rmt_rx_disable ;
// return general channel handle
* ret_chan = & rx_channel - > base ;
2022-08-04 13:08:48 +08:00
ESP_LOGD ( TAG , " new rx channel(%d,%d) at %p, gpio=%d, res=% " PRIu32 " Hz, hw_mem_base=%p, ping_pong_size=%d " ,
2022-04-07 11:59:46 +08:00
group_id , channel_id , rx_channel , config - > gpio_num , rx_channel - > base . resolution_hz ,
rx_channel - > base . hw_mem_base , rx_channel - > ping_pong_symbols ) ;
return ESP_OK ;
err :
if ( rx_channel ) {
2023-02-16 16:33:42 +08:00
rmt_rx_destroy ( rx_channel ) ;
2022-04-07 11:59:46 +08:00
}
return ret ;
}
static esp_err_t rmt_del_rx_channel ( rmt_channel_handle_t channel )
{
2023-10-18 15:50:58 +08:00
ESP_RETURN_ON_FALSE ( atomic_load ( & channel - > fsm ) = = RMT_FSM_INIT ,
ESP_ERR_INVALID_STATE , TAG , " channel not in init state " ) ;
2022-04-07 11:59:46 +08:00
rmt_rx_channel_t * rx_chan = __containerof ( channel , rmt_rx_channel_t , base ) ;
rmt_group_t * group = channel - > group ;
int group_id = group - > group_id ;
int channel_id = channel - > channel_id ;
ESP_LOGD ( TAG , " del rx channel(%d,%d) " , group_id , channel_id ) ;
// recycle memory resource
2023-02-16 16:33:42 +08:00
ESP_RETURN_ON_ERROR ( rmt_rx_destroy ( rx_chan ) , TAG , " destroy rx channel failed " ) ;
2022-04-07 11:59:46 +08:00
return ESP_OK ;
}
esp_err_t rmt_rx_register_event_callbacks ( rmt_channel_handle_t channel , const rmt_rx_event_callbacks_t * cbs , void * user_data )
{
ESP_RETURN_ON_FALSE ( channel & & cbs , ESP_ERR_INVALID_ARG , TAG , " invalid argument " ) ;
ESP_RETURN_ON_FALSE ( channel - > direction = = RMT_CHANNEL_DIRECTION_RX , ESP_ERR_INVALID_ARG , TAG , " invalid channel direction " ) ;
rmt_rx_channel_t * rx_chan = __containerof ( channel , rmt_rx_channel_t , base ) ;
2025-03-26 14:09:14 +08:00
# if CONFIG_RMT_RX_ISR_CACHE_SAFE
2022-04-07 11:59:46 +08:00
if ( cbs - > on_recv_done ) {
ESP_RETURN_ON_FALSE ( esp_ptr_in_iram ( cbs - > on_recv_done ) , ESP_ERR_INVALID_ARG , TAG , " on_recv_done callback not in IRAM " ) ;
}
if ( user_data ) {
ESP_RETURN_ON_FALSE ( esp_ptr_internal ( user_data ) , ESP_ERR_INVALID_ARG , TAG , " user context not in internal RAM " ) ;
}
# endif
rx_chan - > on_recv_done = cbs - > on_recv_done ;
rx_chan - > user_data = user_data ;
return ESP_OK ;
}
esp_err_t rmt_receive ( rmt_channel_handle_t channel , void * buffer , size_t buffer_size , const rmt_receive_config_t * config )
{
2023-11-02 11:39:29 +08:00
ESP_RETURN_ON_FALSE_ISR ( channel & & buffer & & buffer_size & & config , ESP_ERR_INVALID_ARG , TAG , " invalid argument " ) ;
ESP_RETURN_ON_FALSE_ISR ( channel - > direction = = RMT_CHANNEL_DIRECTION_RX , ESP_ERR_INVALID_ARG , TAG , " invalid channel direction " ) ;
2024-04-01 14:22:53 +08:00
# if !SOC_RMT_SUPPORT_RX_PINGPONG
ESP_RETURN_ON_FALSE_ISR ( ! config - > flags . en_partial_rx , ESP_ERR_NOT_SUPPORTED , TAG , " partial receive not supported " ) ;
# endif
2022-04-07 11:59:46 +08:00
rmt_rx_channel_t * rx_chan = __containerof ( channel , rmt_rx_channel_t , base ) ;
2024-06-16 14:09:04 +08:00
size_t mem_alignment = sizeof ( rmt_symbol_word_t ) ;
2022-04-07 11:59:46 +08:00
2024-06-16 14:09:04 +08:00
# if SOC_RMT_SUPPORT_DMA
2022-04-07 11:59:46 +08:00
if ( channel - > dma_chan ) {
2024-06-16 14:09:04 +08:00
// append the alignment requirement from the DMA
mem_alignment = MAX ( mem_alignment , rx_chan - > dma_int_mem_alignment ) ;
// [IDF-8997]: Currently we assume the user buffer is allocated from internal RAM, PSRAM is not supported yet.
ESP_RETURN_ON_FALSE_ISR ( esp_ptr_internal ( buffer ) , ESP_ERR_INVALID_ARG , TAG , " user buffer not in the internal RAM " ) ;
size_t max_buf_sz_per_dma_node = ALIGN_DOWN ( DMA_DESCRIPTOR_BUFFER_MAX_SIZE , mem_alignment ) ;
ESP_RETURN_ON_FALSE_ISR ( buffer_size < = rx_chan - > num_dma_nodes * max_buf_sz_per_dma_node ,
ESP_ERR_INVALID_ARG , TAG , " buffer size exceeds DMA capacity: %zu " , rx_chan - > num_dma_nodes * max_buf_sz_per_dma_node ) ;
2022-04-07 11:59:46 +08:00
}
2024-06-16 14:09:04 +08:00
# endif // SOC_RMT_SUPPORT_DMA
// check buffer alignment
uint32_t align_check_mask = mem_alignment - 1 ;
2024-09-20 15:58:25 +08:00
ESP_RETURN_ON_FALSE_ISR ( ( ( ( ( uintptr_t ) buffer ) & align_check_mask ) = = 0 ) & & ( ( ( buffer_size ) & align_check_mask ) = = 0 ) , ESP_ERR_INVALID_ARG ,
2025-03-26 14:09:14 +08:00
TAG , " buffer address or size are not % " PRIu32 " bytes aligned " , mem_alignment ) ;
2023-11-02 18:51:33 +08:00
2022-04-07 11:59:46 +08:00
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
int channel_id = channel - > channel_id ;
2024-04-02 18:21:45 +08:00
uint32_t filter_reg_value = ( ( uint64_t ) rx_chan - > filter_clock_resolution_hz * config - > signal_range_min_ns ) / 1000000000UL ;
2023-04-28 15:01:17 +08:00
uint32_t idle_reg_value = ( ( uint64_t ) channel - > resolution_hz * config - > signal_range_max_ns ) / 1000000000UL ;
2025-04-24 15:04:06 +08:00
ESP_RETURN_ON_FALSE_ISR ( filter_reg_value < = RMT_LL_MAX_FILTER_VALUE , ESP_ERR_INVALID_ARG , TAG , " signal_range_min_ns too big, should be less than % " PRIu32 " ns " , ( uint32_t ) ( ( uint64_t ) RMT_LL_MAX_FILTER_VALUE * 1000000000UL / rx_chan - > filter_clock_resolution_hz ) ) ;
ESP_RETURN_ON_FALSE_ISR ( idle_reg_value < = RMT_LL_MAX_IDLE_VALUE , ESP_ERR_INVALID_ARG , TAG , " signal_range_max_ns too big, should be less than % " PRIu32 " ns " , ( uint32_t ) ( ( uint64_t ) RMT_LL_MAX_IDLE_VALUE * 1000000000UL / channel - > resolution_hz ) ) ;
2023-04-28 15:01:17 +08:00
2023-10-18 15:50:58 +08:00
// check if we're in a proper state to start the receiver
rmt_fsm_t expected_fsm = RMT_FSM_ENABLE ;
2025-04-09 12:03:22 +08:00
ESP_RETURN_ON_FALSE_ISR ( atomic_compare_exchange_strong ( & channel - > fsm , & expected_fsm , RMT_FSM_WAIT ) ,
2023-11-02 11:39:29 +08:00
ESP_ERR_INVALID_STATE , TAG , " channel not in enable state " ) ;
2023-10-18 15:50:58 +08:00
2022-04-07 11:59:46 +08:00
// fill in the transaction descriptor
rmt_rx_trans_desc_t * t = & rx_chan - > trans_desc ;
2023-11-02 18:51:33 +08:00
memset ( t , 0 , sizeof ( rmt_rx_trans_desc_t ) ) ;
2022-04-07 11:59:46 +08:00
t - > buffer = buffer ;
t - > buffer_size = buffer_size ;
t - > received_symbol_num = 0 ;
t - > copy_dest_off = 0 ;
2023-11-02 18:51:33 +08:00
t - > dma_desc_index = 0 ;
t - > flags . en_partial_rx = config - > flags . en_partial_rx ;
2022-04-07 11:59:46 +08:00
# if SOC_RMT_SUPPORT_DMA
2024-06-16 14:09:04 +08:00
if ( channel - > dma_chan ) {
2024-01-12 16:38:30 +08:00
// invalidate the user buffer, in case cache auto-write back happens and breaks the data just written by the DMA
2024-06-16 14:09:04 +08:00
uint32_t int_mem_cache_line_size = cache_hal_get_cache_line_size ( CACHE_LL_LEVEL_INT_MEM , CACHE_TYPE_DATA ) ;
if ( int_mem_cache_line_size ) {
// this function will also check the alignment of the buffer and size, against the cache line size
2024-01-12 16:38:30 +08:00
ESP_RETURN_ON_ERROR_ISR ( esp_cache_msync ( buffer , buffer_size , ESP_CACHE_MSYNC_FLAG_DIR_M2C ) , TAG , " cache sync failed " ) ;
}
2024-06-16 14:09:04 +08:00
// we will mount the buffer to multiple DMA nodes, in a balanced way
size_t per_dma_block_size = buffer_size / rx_chan - > num_dma_nodes ;
per_dma_block_size = ALIGN_DOWN ( per_dma_block_size , mem_alignment ) ;
size_t last_dma_block_size = buffer_size - per_dma_block_size * ( rx_chan - > num_dma_nodes - 1 ) ;
2023-11-02 18:51:33 +08:00
rmt_rx_mount_dma_buffer ( rx_chan , buffer , buffer_size , per_dma_block_size , last_dma_block_size ) ;
2022-04-07 11:59:46 +08:00
gdma_reset ( channel - > dma_chan ) ;
2023-08-01 17:32:26 +08:00
gdma_start ( channel - > dma_chan , ( intptr_t ) rx_chan - > dma_nodes ) ; // note, we must use the cached descriptor address to start the DMA
2022-04-07 11:59:46 +08:00
}
2024-06-16 14:09:04 +08:00
# endif
2022-04-07 11:59:46 +08:00
rx_chan - > mem_off = 0 ;
2023-11-02 11:39:29 +08:00
portENTER_CRITICAL_SAFE ( & channel - > spinlock ) ;
2022-04-07 11:59:46 +08:00
// reset memory writer offset
rmt_ll_rx_reset_pointer ( hal - > regs , channel_id ) ;
rmt_ll_rx_set_mem_owner ( hal - > regs , channel_id , RMT_LL_MEM_OWNER_HW ) ;
// set sampling parameters of incoming signals
2023-04-28 15:01:17 +08:00
rmt_ll_rx_set_filter_thres ( hal - > regs , channel_id , filter_reg_value ) ;
2022-04-07 11:59:46 +08:00
rmt_ll_rx_enable_filter ( hal - > regs , channel_id , config - > signal_range_min_ns ! = 0 ) ;
2023-04-28 15:01:17 +08:00
rmt_ll_rx_set_idle_thres ( hal - > regs , channel_id , idle_reg_value ) ;
2022-04-07 11:59:46 +08:00
// turn on RMT RX machine
rmt_ll_rx_enable ( hal - > regs , channel_id , true ) ;
2023-10-18 15:50:58 +08:00
// saying we're in running state, this state will last until the receiving is done
// i.e., we will switch back to the enable state in the receive done interrupt handler
atomic_store ( & channel - > fsm , RMT_FSM_RUN ) ;
2025-04-24 15:04:06 +08:00
portEXIT_CRITICAL_SAFE ( & channel - > spinlock ) ;
2023-10-18 15:50:58 +08:00
2022-04-07 11:59:46 +08:00
return ESP_OK ;
}
static esp_err_t rmt_rx_demodulate_carrier ( rmt_channel_handle_t channel , const rmt_carrier_config_t * config )
{
# if !SOC_RMT_SUPPORT_RX_DEMODULATION
ESP_RETURN_ON_FALSE ( false , ESP_ERR_NOT_SUPPORTED , TAG , " rx demodulation not supported " ) ;
# else
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
int group_id = group - > group_id ;
int channel_id = channel - > channel_id ;
uint32_t real_frequency = 0 ;
if ( config & & config - > frequency_hz ) {
// carrier demodulation module works base on channel clock (this is different from TX carrier modulation mode)
uint32_t total_ticks = channel - > resolution_hz / config - > frequency_hz ; // Note this division operation will lose precision
uint32_t high_ticks = total_ticks * config - > duty_cycle ;
uint32_t low_ticks = total_ticks - high_ticks ;
portENTER_CRITICAL ( & channel - > spinlock ) ;
rmt_ll_rx_set_carrier_level ( hal - > regs , channel_id , ! config - > flags . polarity_active_low ) ;
rmt_ll_rx_set_carrier_high_low_ticks ( hal - > regs , channel_id , high_ticks , low_ticks ) ;
portEXIT_CRITICAL ( & channel - > spinlock ) ;
// save real carrier frequency
real_frequency = channel - > resolution_hz / ( high_ticks + low_ticks ) ;
}
// enable/disable carrier demodulation
portENTER_CRITICAL ( & channel - > spinlock ) ;
rmt_ll_rx_enable_carrier_demodulation ( hal - > regs , channel_id , real_frequency > 0 ) ;
portEXIT_CRITICAL ( & channel - > spinlock ) ;
if ( real_frequency > 0 ) {
2022-08-04 13:08:48 +08:00
ESP_LOGD ( TAG , " enable carrier demodulation for channel(%d,%d), freq=% " PRIu32 " Hz " , group_id , channel_id , real_frequency ) ;
2022-04-07 11:59:46 +08:00
} else {
ESP_LOGD ( TAG , " disable carrier demodulation for channel(%d, %d) " , group_id , channel_id ) ;
}
return ESP_OK ;
# endif
}
static esp_err_t rmt_rx_enable ( rmt_channel_handle_t channel )
{
2023-10-18 15:50:58 +08:00
// can only enable the channel when it's in "init" state
rmt_fsm_t expected_fsm = RMT_FSM_INIT ;
2025-04-09 12:03:22 +08:00
ESP_RETURN_ON_FALSE ( atomic_compare_exchange_strong ( & channel - > fsm , & expected_fsm , RMT_FSM_WAIT ) ,
2023-10-18 15:50:58 +08:00
ESP_ERR_INVALID_STATE , TAG , " channel not in init state " ) ;
2022-04-07 11:59:46 +08:00
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
int channel_id = channel - > channel_id ;
2025-05-16 17:38:22 +08:00
# if CONFIG_PM_ENABLE
2022-04-07 11:59:46 +08:00
// acquire power manager lock
if ( channel - > pm_lock ) {
2023-10-18 15:50:58 +08:00
esp_pm_lock_acquire ( channel - > pm_lock ) ;
2022-04-07 11:59:46 +08:00
}
2025-05-16 17:38:22 +08:00
# endif
2022-04-07 11:59:46 +08:00
if ( channel - > dma_chan ) {
# if SOC_RMT_SUPPORT_DMA
// enable the DMA access mode
portENTER_CRITICAL ( & channel - > spinlock ) ;
rmt_ll_rx_enable_dma ( hal - > regs , channel_id , true ) ;
portEXIT_CRITICAL ( & channel - > spinlock ) ;
gdma_connect ( channel - > dma_chan , GDMA_MAKE_TRIGGER ( GDMA_TRIG_PERIPH_RMT , 0 ) ) ;
# endif // SOC_RMT_SUPPORT_DMA
} else {
portENTER_CRITICAL ( & group - > spinlock ) ;
rmt_ll_enable_interrupt ( hal - > regs , RMT_LL_EVENT_RX_MASK ( channel_id ) , true ) ;
portEXIT_CRITICAL ( & group - > spinlock ) ;
}
2023-10-18 15:50:58 +08:00
atomic_store ( & channel - > fsm , RMT_FSM_ENABLE ) ;
2022-04-07 11:59:46 +08:00
return ESP_OK ;
}
static esp_err_t rmt_rx_disable ( rmt_channel_handle_t channel )
{
2023-10-18 15:50:58 +08:00
// can disable the channel when it's in `enable` or `run` state
bool valid_state = false ;
rmt_fsm_t expected_fsm = RMT_FSM_ENABLE ;
2025-04-09 12:03:22 +08:00
if ( atomic_compare_exchange_strong ( & channel - > fsm , & expected_fsm , RMT_FSM_WAIT ) ) {
2023-10-18 15:50:58 +08:00
valid_state = true ;
}
expected_fsm = RMT_FSM_RUN ;
2025-04-09 12:03:22 +08:00
if ( atomic_compare_exchange_strong ( & channel - > fsm , & expected_fsm , RMT_FSM_WAIT ) ) {
2023-10-18 15:50:58 +08:00
valid_state = true ;
}
ESP_RETURN_ON_FALSE ( valid_state , ESP_ERR_INVALID_STATE , TAG , " channel not in enable or run state " ) ;
2022-04-07 11:59:46 +08:00
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
int channel_id = channel - > channel_id ;
portENTER_CRITICAL ( & channel - > spinlock ) ;
rmt_ll_rx_enable ( hal - > regs , channel_id , false ) ;
portEXIT_CRITICAL ( & channel - > spinlock ) ;
if ( channel - > dma_chan ) {
# if SOC_RMT_SUPPORT_DMA
gdma_stop ( channel - > dma_chan ) ;
gdma_disconnect ( channel - > dma_chan ) ;
portENTER_CRITICAL ( & channel - > spinlock ) ;
rmt_ll_rx_enable_dma ( hal - > regs , channel_id , false ) ;
portEXIT_CRITICAL ( & channel - > spinlock ) ;
# endif
} else {
portENTER_CRITICAL ( & group - > spinlock ) ;
rmt_ll_enable_interrupt ( hal - > regs , RMT_LL_EVENT_RX_MASK ( channel_id ) , false ) ;
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_RX_MASK ( channel_id ) ) ;
portEXIT_CRITICAL ( & group - > spinlock ) ;
}
2025-05-16 17:38:22 +08:00
# if CONFIG_PM_ENABLE
2022-04-07 11:59:46 +08:00
// release power manager lock
if ( channel - > pm_lock ) {
2023-10-18 15:50:58 +08:00
esp_pm_lock_release ( channel - > pm_lock ) ;
2022-04-07 11:59:46 +08:00
}
2025-05-16 17:38:22 +08:00
# endif
2023-10-18 15:50:58 +08:00
// now we can switch the state to init
atomic_store ( & channel - > fsm , RMT_FSM_INIT ) ;
2022-04-07 11:59:46 +08:00
return ESP_OK ;
}
2025-03-26 14:09:14 +08:00
bool rmt_isr_handle_rx_done ( rmt_rx_channel_t * rx_chan )
2022-04-07 11:59:46 +08:00
{
rmt_channel_t * channel = & rx_chan - > base ;
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
uint32_t channel_id = channel - > channel_id ;
rmt_rx_trans_desc_t * trans_desc = & rx_chan - > trans_desc ;
2023-11-02 18:51:33 +08:00
rmt_rx_done_callback_t cb = rx_chan - > on_recv_done ;
2022-04-07 11:59:46 +08:00
bool need_yield = false ;
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_RX_DONE ( channel_id ) ) ;
portENTER_CRITICAL_ISR ( & channel - > spinlock ) ;
// disable the RX engine, it will be enabled again when next time user calls `rmt_receive()`
rmt_ll_rx_enable ( hal - > regs , channel_id , false ) ;
2023-11-02 18:51:33 +08:00
portEXIT_CRITICAL_ISR ( & channel - > spinlock ) ;
2022-04-07 11:59:46 +08:00
uint32_t offset = rmt_ll_rx_get_memory_writer_offset ( hal - > regs , channel_id ) ;
2025-03-12 20:26:15 +08:00
// Start from C6, the actual pulse count is the number of input pulses N - 1.
// Resulting in the last threshold interrupts may not be triggered correctly when the number of received symbols is a multiple of the memory block size.
// As shown in the figure below, So we special handle the offset
// mem_off (should be updated here in the last threshold interrupt, but interrupt lost)
// |
// V
// |________|________|
// | |
// offset mem_off(actually here now)
size_t mem_want = ( offset > = rx_chan - > mem_off ? offset - rx_chan - > mem_off : rx_chan - > mem_off - offset ) ;
mem_want * = sizeof ( rmt_symbol_word_t ) ;
2023-11-02 18:51:33 +08:00
size_t mem_have = trans_desc - > buffer_size - trans_desc - > copy_dest_off ;
size_t copy_size = mem_want ;
if ( mem_want > mem_have ) {
if ( trans_desc - > flags . en_partial_rx ) { // check partial receive is enabled or not
// notify the user to process the received symbols if the buffer is going to be full
if ( trans_desc - > received_symbol_num ) {
if ( cb ) {
rmt_rx_done_event_data_t edata = {
. received_symbols = trans_desc - > buffer ,
. num_symbols = trans_desc - > received_symbol_num ,
. flags . is_last = false ,
} ;
if ( cb ( channel , & edata , rx_chan - > user_data ) ) {
need_yield = true ;
}
}
trans_desc - > copy_dest_off = 0 ;
trans_desc - > received_symbol_num = 0 ;
mem_have = trans_desc - > buffer_size ;
// even user process the partial received data, the remain buffer may still be insufficient
if ( mem_want > mem_have ) {
2025-01-16 12:08:33 -05:00
ESP_DRAM_LOGD ( TAG , " user buffer too small, received symbols truncated " ) ;
2023-11-02 18:51:33 +08:00
copy_size = mem_have ;
}
}
} else {
2025-01-16 12:08:33 -05:00
ESP_DRAM_LOGD ( TAG , " user buffer too small, received symbols truncated " ) ;
2023-11-02 18:51:33 +08:00
copy_size = mem_have ;
}
}
portENTER_CRITICAL_ISR ( & channel - > spinlock ) ;
2022-04-07 11:59:46 +08:00
rmt_ll_rx_set_mem_owner ( hal - > regs , channel_id , RMT_LL_MEM_OWNER_SW ) ;
2023-11-02 18:51:33 +08:00
// copy the symbols to the user buffer
memcpy ( ( uint8_t * ) trans_desc - > buffer + trans_desc - > copy_dest_off , channel - > hw_mem_base + rx_chan - > mem_off , copy_size ) ;
2022-04-07 11:59:46 +08:00
rmt_ll_rx_set_mem_owner ( hal - > regs , channel_id , RMT_LL_MEM_OWNER_HW ) ;
portEXIT_CRITICAL_ISR ( & channel - > spinlock ) ;
# if !SOC_RMT_SUPPORT_RX_PINGPONG
// for chips doesn't support ping-pong RX, we should check whether the receiver has encountered with a long frame,
// whose length is longer than the channel capacity
if ( rmt_ll_rx_get_interrupt_status_raw ( hal - > regs , channel_id ) & RMT_LL_EVENT_RX_ERROR ( channel_id ) ) {
portENTER_CRITICAL_ISR ( & channel - > spinlock ) ;
rmt_ll_rx_reset_pointer ( hal - > regs , channel_id ) ;
portEXIT_CRITICAL_ISR ( & channel - > spinlock ) ;
// this clear operation can only take effect after we copy out the received data and reset the pointer
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_RX_ERROR ( channel_id ) ) ;
2025-01-16 12:08:33 -05:00
ESP_DRAM_LOGD ( TAG , " hw buffer too small, received symbols truncated " ) ;
2022-04-07 11:59:46 +08:00
}
# endif // !SOC_RMT_SUPPORT_RX_PINGPONG
trans_desc - > copy_dest_off + = copy_size ;
trans_desc - > received_symbol_num + = copy_size / sizeof ( rmt_symbol_word_t ) ;
2023-11-02 11:39:29 +08:00
// switch back to the enable state, then user can call `rmt_receive` to start a new receive
atomic_store ( & channel - > fsm , RMT_FSM_ENABLE ) ;
2023-11-02 18:51:33 +08:00
// notify the user that all RMT symbols are received done
if ( cb ) {
2022-04-07 11:59:46 +08:00
rmt_rx_done_event_data_t edata = {
. received_symbols = trans_desc - > buffer ,
. num_symbols = trans_desc - > received_symbol_num ,
2023-11-02 18:51:33 +08:00
. flags . is_last = true ,
2022-04-07 11:59:46 +08:00
} ;
2023-11-02 18:51:33 +08:00
if ( cb ( channel , & edata , rx_chan - > user_data ) ) {
2022-04-07 11:59:46 +08:00
need_yield = true ;
}
}
return need_yield ;
}
# if SOC_RMT_SUPPORT_RX_PINGPONG
2025-03-26 14:09:14 +08:00
bool rmt_isr_handle_rx_threshold ( rmt_rx_channel_t * rx_chan )
2022-04-07 11:59:46 +08:00
{
2023-11-02 18:51:33 +08:00
bool need_yield = false ;
2022-04-07 11:59:46 +08:00
rmt_channel_t * channel = & rx_chan - > base ;
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
uint32_t channel_id = channel - > channel_id ;
rmt_rx_trans_desc_t * trans_desc = & rx_chan - > trans_desc ;
rmt_ll_clear_interrupt_status ( hal - > regs , RMT_LL_EVENT_RX_THRES ( channel_id ) ) ;
2023-11-02 18:51:33 +08:00
size_t mem_want = rx_chan - > ping_pong_symbols * sizeof ( rmt_symbol_word_t ) ;
size_t mem_have = trans_desc - > buffer_size - trans_desc - > copy_dest_off ;
size_t copy_size = mem_want ;
if ( mem_want > mem_have ) {
if ( trans_desc - > flags . en_partial_rx ) {
// notify the user to process the received symbols if the buffer is going to be full
if ( trans_desc - > received_symbol_num ) {
rmt_rx_done_callback_t cb = rx_chan - > on_recv_done ;
if ( cb ) {
rmt_rx_done_event_data_t edata = {
. received_symbols = trans_desc - > buffer ,
. num_symbols = trans_desc - > received_symbol_num ,
. flags . is_last = false ,
} ;
if ( cb ( channel , & edata , rx_chan - > user_data ) ) {
need_yield = true ;
}
}
trans_desc - > copy_dest_off = 0 ;
trans_desc - > received_symbol_num = 0 ;
mem_have = trans_desc - > buffer_size ;
// even user process the partial received data, the remain buffer size still insufficient
if ( mem_want > mem_have ) {
2025-01-16 12:08:33 -05:00
ESP_DRAM_LOGD ( TAG , " user buffer too small, received symbols truncated " ) ;
2023-11-02 18:51:33 +08:00
copy_size = mem_have ;
}
}
} else {
2025-01-16 12:08:33 -05:00
ESP_DRAM_LOGD ( TAG , " user buffer too small, received symbols truncated " ) ;
2023-11-02 18:51:33 +08:00
copy_size = mem_have ;
}
}
2022-04-07 11:59:46 +08:00
portENTER_CRITICAL_ISR ( & channel - > spinlock ) ;
rmt_ll_rx_set_mem_owner ( hal - > regs , channel_id , RMT_LL_MEM_OWNER_SW ) ;
2023-11-02 18:51:33 +08:00
// copy the symbols to the user buffer
memcpy ( ( uint8_t * ) trans_desc - > buffer + trans_desc - > copy_dest_off , channel - > hw_mem_base + rx_chan - > mem_off , copy_size ) ;
2022-04-07 11:59:46 +08:00
rmt_ll_rx_set_mem_owner ( hal - > regs , channel_id , RMT_LL_MEM_OWNER_HW ) ;
portEXIT_CRITICAL_ISR ( & channel - > spinlock ) ;
trans_desc - > copy_dest_off + = copy_size ;
trans_desc - > received_symbol_num + = copy_size / sizeof ( rmt_symbol_word_t ) ;
2023-11-02 18:51:33 +08:00
2022-04-07 11:59:46 +08:00
// update the hw memory offset, where stores the next RMT symbols to copy
rx_chan - > mem_off = rx_chan - > ping_pong_symbols - rx_chan - > mem_off ;
2023-11-02 18:51:33 +08:00
return need_yield ;
2022-04-07 11:59:46 +08:00
}
# endif // SOC_RMT_SUPPORT_RX_PINGPONG
2024-12-19 14:58:21 +08:00
static void rmt_rx_default_isr ( void * args )
2022-04-07 11:59:46 +08:00
{
rmt_rx_channel_t * rx_chan = ( rmt_rx_channel_t * ) args ;
rmt_channel_t * channel = & rx_chan - > base ;
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
uint32_t channel_id = channel - > channel_id ;
bool need_yield = false ;
uint32_t status = rmt_ll_rx_get_interrupt_status ( hal - > regs , channel_id ) ;
# if SOC_RMT_SUPPORT_RX_PINGPONG
// RX threshold interrupt
if ( status & RMT_LL_EVENT_RX_THRES ( channel_id ) ) {
if ( rmt_isr_handle_rx_threshold ( rx_chan ) ) {
need_yield = true ;
}
}
# endif // SOC_RMT_SUPPORT_RX_PINGPONG
// RX end interrupt
if ( status & RMT_LL_EVENT_RX_DONE ( channel_id ) ) {
if ( rmt_isr_handle_rx_done ( rx_chan ) ) {
need_yield = true ;
}
}
if ( need_yield ) {
portYIELD_FROM_ISR ( ) ;
}
}
# if SOC_RMT_SUPPORT_DMA
2025-03-26 14:09:14 +08:00
__attribute__ ( ( always_inline ) )
static inline size_t rmt_rx_count_symbols_until_eof ( rmt_rx_channel_t * rx_chan , int start_index )
2022-04-07 11:59:46 +08:00
{
size_t received_bytes = 0 ;
2023-11-02 18:51:33 +08:00
for ( int i = 0 ; i < rx_chan - > num_dma_nodes ; i + + ) {
received_bytes + = rx_chan - > dma_nodes_nc [ start_index ] . dw0 . length ;
if ( rx_chan - > dma_nodes_nc [ start_index ] . dw0 . suc_eof ) {
break ;
}
start_index + + ;
start_index % = rx_chan - > num_dma_nodes ;
2022-04-07 11:59:46 +08:00
}
received_bytes = ALIGN_UP ( received_bytes , sizeof ( rmt_symbol_word_t ) ) ;
return received_bytes / sizeof ( rmt_symbol_word_t ) ;
}
2025-03-26 14:09:14 +08:00
__attribute__ ( ( always_inline ) )
static inline size_t rmt_rx_count_symbols_for_single_block ( rmt_rx_channel_t * rx_chan , int desc_index )
2023-11-02 18:51:33 +08:00
{
size_t received_bytes = rx_chan - > dma_nodes_nc [ desc_index ] . dw0 . length ;
received_bytes = ALIGN_UP ( received_bytes , sizeof ( rmt_symbol_word_t ) ) ;
return received_bytes / sizeof ( rmt_symbol_word_t ) ;
}
2024-12-19 14:58:21 +08:00
static bool rmt_dma_rx_one_block_cb ( gdma_channel_handle_t dma_chan , gdma_event_data_t * event_data , void * user_data )
2022-04-07 11:59:46 +08:00
{
bool need_yield = false ;
rmt_rx_channel_t * rx_chan = ( rmt_rx_channel_t * ) user_data ;
rmt_channel_t * channel = & rx_chan - > base ;
rmt_group_t * group = channel - > group ;
rmt_hal_context_t * hal = & group - > hal ;
rmt_rx_trans_desc_t * trans_desc = & rx_chan - > trans_desc ;
uint32_t channel_id = channel - > channel_id ;
2024-01-05 11:18:03 +08:00
uint32_t data_cache_line_size = cache_hal_get_cache_line_size ( CACHE_LL_LEVEL_INT_MEM , CACHE_TYPE_DATA ) ;
if ( data_cache_line_size ) {
// invalidate the user buffer, so that the DMA modified data can be seen by CPU
esp_cache_msync ( trans_desc - > buffer , trans_desc - > buffer_size , ESP_CACHE_MSYNC_FLAG_DIR_M2C ) ;
2023-08-01 17:32:26 +08:00
}
2023-11-02 18:51:33 +08:00
if ( event_data - > flags . normal_eof ) {
// if the DMA received an EOF, it means the RMT peripheral has received an "end marker"
portENTER_CRITICAL_ISR ( & channel - > spinlock ) ;
// disable the RX engine, it will be enabled again in the next `rmt_receive()`
rmt_ll_rx_enable ( hal - > regs , channel_id , false ) ;
portEXIT_CRITICAL_ISR ( & channel - > spinlock ) ;
2023-11-02 11:39:29 +08:00
2023-11-02 18:51:33 +08:00
// switch back to the enable state, then user can call `rmt_receive` to start a new receive
atomic_store ( & channel - > fsm , RMT_FSM_ENABLE ) ;
if ( rx_chan - > on_recv_done ) {
int recycle_start_index = trans_desc - > dma_desc_index ;
rmt_rx_done_event_data_t edata = {
. received_symbols = rx_chan - > dma_nodes_nc [ recycle_start_index ] . buffer ,
. num_symbols = rmt_rx_count_symbols_until_eof ( rx_chan , recycle_start_index ) ,
. flags . is_last = true ,
} ;
if ( rx_chan - > on_recv_done ( channel , & edata , rx_chan - > user_data ) ) {
need_yield = true ;
}
}
} else {
// it's a partial receive done event
if ( trans_desc - > flags . en_partial_rx ) {
if ( rx_chan - > on_recv_done ) {
size_t dma_desc_index = trans_desc - > dma_desc_index ;
rmt_rx_done_event_data_t edata = {
. received_symbols = rx_chan - > dma_nodes_nc [ dma_desc_index ] . buffer ,
. num_symbols = rmt_rx_count_symbols_for_single_block ( rx_chan , dma_desc_index ) ,
. flags . is_last = false ,
} ;
if ( rx_chan - > on_recv_done ( channel , & edata , rx_chan - > user_data ) ) {
need_yield = true ;
}
dma_desc_index + + ;
trans_desc - > dma_desc_index = dma_desc_index % rx_chan - > num_dma_nodes ;
}
2022-04-07 11:59:46 +08:00
}
}
2023-11-02 11:39:29 +08:00
2022-04-07 11:59:46 +08:00
return need_yield ;
}
2023-11-02 18:51:33 +08:00
2022-04-07 11:59:46 +08:00
# endif // SOC_RMT_SUPPORT_DMA