feat(esp_gdma): adapt the gdma driver to ahb-dma and axi-dma

This commit is contained in:
morris
2023-07-07 23:45:25 +08:00
parent 56a376c696
commit 9eb145aa21
12 changed files with 1969 additions and 553 deletions

View File

@@ -0,0 +1,505 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include "hal/gdma_types.h"
#include "hal/gdma_ll.h"
#include "soc/ahb_dma_struct.h"
#include "soc/ahb_dma_reg.h"
#ifdef __cplusplus
extern "C" {
#endif
#define AHB_DMA_LL_GET_HW(id) (((id) == 0) ? (&AHB_DMA) : NULL)
// any "dummy" peripheral ID can be used for M2M mode
#define AHB_DMA_LL_M2M_FREE_PERIPH_ID_MASK (0xFAC2)
#define AHB_DMA_LL_INVALID_PERIPH_ID (0x3F)
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Force enable register clock
*/
static inline void ahb_dma_ll_force_enable_reg_clock(ahb_dma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
/**
* @brief Disable priority arbitration
*
* @param dev DMA register base address
* @param dis True to disable, false to enable
*/
static inline void ahb_dma_ll_disable_prio_arb(ahb_dma_dev_t *dev, bool dis)
{
dev->misc_conf.arb_pri_dis = dis;
}
/**
* @brief Reset DMA FSM
*
* @param dev DMA register base address
*/
static inline void ahb_dma_ll_reset_fsm(ahb_dma_dev_t *dev)
{
dev->misc_conf.ahbm_rst_inter = 1;
dev->misc_conf.ahbm_rst_inter = 0;
}
///////////////////////////////////// RX /////////////////////////////////////////
/**
* @brief Get DMA RX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->in_intr[channel].st.val;
}
/**
* @brief Enable DMA RX channel interrupt
*/
static inline void ahb_dma_ll_rx_enable_interrupt(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->in_intr[channel].ena.val |= mask;
} else {
dev->in_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA RX channel interrupt
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_clear_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->in_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA RX channel interrupt status register address
*/
static inline volatile void *ahb_dma_ll_rx_get_interrupt_status_reg(ahb_dma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->in_intr[channel].st);
}
/**
* @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void ahb_dma_ll_rx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_conf1.in_check_owner_chn = enable;
}
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
*/
static inline void ahb_dma_ll_rx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_conf0.in_data_burst_en_chn = enable;
}
/**
* @brief Enable DMA RX channel burst reading descriptor link, disabled by default
*/
static inline void ahb_dma_ll_rx_enable_descriptor_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_conf0.indscr_burst_en_chn = enable;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_reset_channel(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_conf0.in_rst_chn = 1;
dev->channel[channel].in.in_conf0.in_rst_chn = 0;
}
/**
* @brief Check if DMA RX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_rx_is_fifo_full(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].in.infifo_status.val & 0x01;
}
/**
* @brief Check if DMA RX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_rx_is_fifo_empty(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].in.infifo_status.val & 0x02;
}
/**
* @brief Get number of bytes remained in the L1 RX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t ahb_dma_ll_rx_get_fifo_bytes(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].in.infifo_status.infifo_cnt_chn;
}
/**
* @brief Pop data from DMA RX FIFO
*/
static inline uint32_t ahb_dma_ll_rx_pop_data(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_pop.infifo_pop_chn = 1;
return dev->channel[channel].in.in_pop.infifo_rdata_chn;
}
/**
* @brief Set the descriptor link base address for RX channel
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_set_desc_addr(ahb_dma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->in_link_addr[channel].inlink_addr_chn = addr;
}
/**
* @brief Start dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_start(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_link.inlink_start_chn = 1;
}
/**
* @brief Stop dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_stop(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_link.inlink_stop_chn = 1;
}
/**
* @brief Restart a new inlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_rx_restart(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_link.inlink_restart_chn = 1;
}
/**
* @brief Enable DMA RX to return the address of current descriptor when receives error
*/
static inline void ahb_dma_ll_rx_enable_auto_return(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_link.inlink_auto_ret_chn = enable;
}
/**
* @brief Check if DMA RX FSM is in IDLE state
*/
static inline bool ahb_dma_ll_rx_is_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_link.inlink_park_chn;
}
/**
* @brief Get RX success EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_success_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_suc_eof_des_addr.val;
}
/**
* @brief Get RX error EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_error_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_err_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched RX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_rx_get_prefetched_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].in.in_dscr.val;
}
/**
* @brief Set priority for DMA RX channel
*/
static inline void ahb_dma_ll_rx_set_priority(ahb_dma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->channel[channel].in.in_pri.rx_pri_chn = prio;
}
/**
* @brief Connect DMA RX channel to a given peripheral
*/
static inline void ahb_dma_ll_rx_connect_to_periph(ahb_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
dev->channel[channel].in.in_peri_sel.peri_in_sel_chn = periph_id;
dev->channel[channel].in.in_conf0.mem_trans_en_chn = (periph == GDMA_TRIG_PERIPH_M2M);
}
/**
* @brief Disconnect DMA RX channel from peripheral
*/
static inline void ahb_dma_ll_rx_disconnect_from_periph(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].in.in_peri_sel.peri_in_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
dev->channel[channel].in.in_conf0.mem_trans_en_chn = false;
}
/**
* @brief Whether to enable the ETM subsystem for RX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void ahb_dma_ll_rx_enable_etm_task(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].in.in_conf0.in_etm_en_chn = enable;
}
///////////////////////////////////// TX /////////////////////////////////////////
/**
* @brief Get DMA TX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_tx_get_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->out_intr[channel].st.val;
}
/**
* @brief Enable DMA TX channel interrupt
*/
static inline void ahb_dma_ll_tx_enable_interrupt(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->out_intr[channel].ena.val |= mask;
} else {
dev->out_intr[channel].ena.val &= ~mask;
}
}
/**
* @brief Clear DMA TX channel interrupt
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_clear_interrupt_status(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->out_intr[channel].clr.val = mask;
}
/**
* @brief Get DMA TX channel interrupt status register address
*/
static inline volatile void *ahb_dma_ll_tx_get_interrupt_status_reg(ahb_dma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->out_intr[channel].st);
}
/**
* @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_owner_check(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf1.out_check_owner_chn = enable;
}
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_data_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf0.out_data_burst_en_chn = enable;
}
/**
* @brief Enable DMA TX channel burst reading descriptor link, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_descriptor_burst(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf0.outdscr_burst_en_chn = enable;
}
/**
* @brief Set TX channel EOF mode
*/
static inline void ahb_dma_ll_tx_set_eof_mode(ahb_dma_dev_t *dev, uint32_t channel, uint32_t mode)
{
dev->channel[channel].out.out_conf0.out_eof_mode_chn = mode;
}
/**
* @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
*/
static inline void ahb_dma_ll_tx_enable_auto_write_back(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf0.out_auto_wrback_chn = enable;
}
/**
* @brief Reset DMA TX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_reset_channel(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_conf0.out_rst_chn = 1;
dev->channel[channel].out.out_conf0.out_rst_chn = 0;
}
/**
* @brief Check if DMA TX FIFO is full
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_tx_is_fifo_full(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].out.outfifo_status.val & 0x01;
}
/**
* @brief Check if DMA TX FIFO is empty
* @param fifo_level only supports level 1
*/
static inline bool ahb_dma_ll_tx_is_fifo_empty(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].out.outfifo_status.val & 0x02;
}
/**
* @brief Get number of bytes in TX FIFO
* @param fifo_level only supports level 1
*/
static inline uint32_t ahb_dma_ll_tx_get_fifo_bytes(ahb_dma_dev_t *dev, uint32_t channel, uint32_t fifo_level)
{
return dev->channel[channel].out.outfifo_status.outfifo_cnt_chn;
}
/**
* @brief Push data into DMA TX FIFO
*/
static inline void ahb_dma_ll_tx_push_data(ahb_dma_dev_t *dev, uint32_t channel, uint32_t data)
{
dev->channel[channel].out.out_push.outfifo_wdata_chn = data;
dev->channel[channel].out.out_push.outfifo_push_chn = 1;
}
/**
* @brief Set the descriptor link base address for TX channel
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_set_desc_addr(ahb_dma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->out_link_addr[channel].outlink_addr_chn = addr;
}
/**
* @brief Start dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_start(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_link.outlink_start_chn = 1;
}
/**
* @brief Stop dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_stop(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_link.outlink_stop_chn = 1;
}
/**
* @brief Restart a new outlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void ahb_dma_ll_tx_restart(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_link.outlink_restart_chn = 1;
}
/**
* @brief Check if DMA TX FSM is in IDLE state
*/
static inline bool ahb_dma_ll_tx_is_fsm_idle(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].out.out_link.outlink_park_chn;
}
/**
* @brief Get TX EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_tx_get_eof_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].out.out_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched TX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t ahb_dma_ll_tx_get_prefetched_desc_addr(ahb_dma_dev_t *dev, uint32_t channel)
{
return dev->channel[channel].out.out_dscr.val;
}
/**
* @brief Set priority for DMA TX channel
*/
static inline void ahb_dma_ll_tx_set_priority(ahb_dma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->channel[channel].out.out_pri.tx_pri_chn = prio;
}
/**
* @brief Connect DMA TX channel to a given peripheral
*/
static inline void ahb_dma_ll_tx_connect_to_periph(ahb_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
(void)periph;
dev->channel[channel].out.out_peri_sel.peri_out_sel_chn = periph_id;
}
/**
* @brief Disconnect DMA TX channel from peripheral
*/
static inline void ahb_dma_ll_tx_disconnect_from_periph(ahb_dma_dev_t *dev, uint32_t channel)
{
dev->channel[channel].out.out_peri_sel.peri_out_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
}
/**
* @brief Whether to enable the ETM subsystem for TX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void ahb_dma_ll_tx_enable_etm_task(ahb_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->channel[channel].out.out_conf0.out_etm_en_chn = enable;
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,453 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include "hal/gdma_types.h"
#include "hal/gdma_ll.h"
#include "soc/axi_dma_struct.h"
#include "soc/axi_dma_reg.h"
#ifdef __cplusplus
extern "C" {
#endif
#define AXI_DMA_LL_GET_HW(id) (((id) == 0) ? (&AXI_DMA) : NULL)
// any "dummy" peripheral ID can be used for M2M mode
#define AXI_DMA_LL_M2M_FREE_PERIPH_ID_MASK (0xFFC0)
#define AXI_DMA_LL_INVALID_PERIPH_ID (0x3F)
///////////////////////////////////// Common /////////////////////////////////////////
/**
* @brief Force enable register clock
*/
static inline void axi_dma_ll_force_enable_reg_clock(axi_dma_dev_t *dev, bool enable)
{
dev->misc_conf.clk_en = enable;
}
/**
* @brief Disable priority arbitration
*
* @param dev DMA register base address
* @param dis True to disable, false to enable
*/
static inline void axi_dma_ll_disable_prio_arb(axi_dma_dev_t *dev, bool dis)
{
dev->misc_conf.arb_pri_dis = dis;
}
/**
* @brief Reset DMA FSM (Read and Write)
*
* @param dev DMA register base address
*/
static inline void axi_dma_ll_reset_fsm(axi_dma_dev_t *dev)
{
dev->misc_conf.axim_rst_rd_inter = 1;
dev->misc_conf.axim_rst_rd_inter = 0;
dev->misc_conf.axim_rst_wr_inter = 1;
dev->misc_conf.axim_rst_wr_inter = 0;
}
///////////////////////////////////// RX /////////////////////////////////////////
/**
* @brief Get DMA RX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t axi_dma_ll_rx_get_interrupt_status(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->in[channel].intr.st.val;
}
/**
* @brief Enable DMA RX channel interrupt
*/
static inline void axi_dma_ll_rx_enable_interrupt(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->in[channel].intr.ena.val |= mask;
} else {
dev->in[channel].intr.ena.val &= ~mask;
}
}
/**
* @brief Clear DMA RX channel interrupt
*/
__attribute__((always_inline))
static inline void axi_dma_ll_rx_clear_interrupt_status(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->in[channel].intr.clr.val = mask;
}
/**
* @brief Get DMA RX channel interrupt status register address
*/
static inline volatile void *axi_dma_ll_rx_get_interrupt_status_reg(axi_dma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->in[channel].intr.st);
}
/**
* @brief Enable DMA RX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void axi_dma_ll_rx_enable_owner_check(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->in[channel].conf.in_conf1.in_check_owner_chn = enable;
}
/**
* @brief Enable DMA RX channel burst reading data, disabled by default
*/
static inline void axi_dma_ll_rx_enable_data_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
// TODO: IDF-6504
}
/**
* @brief Enable DMA RX channel burst reading descriptor link, disabled by default
*/
static inline void axi_dma_ll_rx_enable_descriptor_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->in[channel].conf.in_conf0.indscr_burst_en_chn = enable;
}
/**
* @brief Reset DMA RX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void axi_dma_ll_rx_reset_channel(axi_dma_dev_t *dev, uint32_t channel)
{
dev->in[channel].conf.in_conf0.in_rst_chn = 1;
dev->in[channel].conf.in_conf0.in_rst_chn = 0;
}
/**
* @brief Pop data from DMA RX FIFO
*/
static inline uint32_t axi_dma_ll_rx_pop_data(axi_dma_dev_t *dev, uint32_t channel)
{
dev->in[channel].conf.in_pop.infifo_pop_chn = 1;
return dev->in[channel].conf.in_pop.infifo_rdata_chn;
}
/**
* @brief Set the descriptor link base address for RX channel
*/
__attribute__((always_inline))
static inline void axi_dma_ll_rx_set_desc_addr(axi_dma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->in[channel].conf.in_link2.inlink_addr_chn = addr;
}
/**
* @brief Start dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void axi_dma_ll_rx_start(axi_dma_dev_t *dev, uint32_t channel)
{
dev->in[channel].conf.in_link1.inlink_start_chn = 1;
}
/**
* @brief Stop dealing with RX descriptors
*/
__attribute__((always_inline))
static inline void axi_dma_ll_rx_stop(axi_dma_dev_t *dev, uint32_t channel)
{
dev->in[channel].conf.in_link1.inlink_stop_chn = 1;
}
/**
* @brief Restart a new inlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void axi_dma_ll_rx_restart(axi_dma_dev_t *dev, uint32_t channel)
{
dev->in[channel].conf.in_link1.inlink_restart_chn = 1;
}
/**
* @brief Enable DMA RX to return the address of current descriptor when receives error
*/
static inline void axi_dma_ll_rx_enable_auto_return(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->in[channel].conf.in_link1.inlink_auto_ret_chn = enable;
}
/**
* @brief Check if DMA RX FSM is in IDLE state
*/
static inline bool axi_dma_ll_rx_is_fsm_idle(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->in[channel].conf.in_link1.inlink_park_chn;
}
/**
* @brief Get RX success EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t axi_dma_ll_rx_get_success_eof_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->in[channel].conf.in_suc_eof_des_addr.val;
}
/**
* @brief Get RX error EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t axi_dma_ll_rx_get_error_eof_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->in[channel].conf.in_err_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched RX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t axi_dma_ll_rx_get_prefetched_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->in[channel].conf.in_dscr.val;
}
/**
* @brief Set priority for DMA RX channel
*/
static inline void axi_dma_ll_rx_set_priority(axi_dma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->in[channel].conf.in_pri.rx_pri_chn = prio;
}
/**
* @brief Connect DMA RX channel to a given peripheral
*/
static inline void axi_dma_ll_rx_connect_to_periph(axi_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
dev->in[channel].conf.in_peri_sel.peri_in_sel_chn = periph_id;
dev->in[channel].conf.in_conf0.mem_trans_en_chn = (periph == GDMA_TRIG_PERIPH_M2M);
}
/**
* @brief Disconnect DMA RX channel from peripheral
*/
static inline void axi_dma_ll_rx_disconnect_from_periph(axi_dma_dev_t *dev, uint32_t channel)
{
dev->in[channel].conf.in_peri_sel.peri_in_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
dev->in[channel].conf.in_conf0.mem_trans_en_chn = false;
}
/**
* @brief Whether to enable the ETM subsystem for RX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void axi_dma_ll_rx_enable_etm_task(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->in[channel].conf.in_conf0.in_etm_en_chn = enable;
}
///////////////////////////////////// TX /////////////////////////////////////////
/**
* @brief Get DMA TX channel interrupt status word
*/
__attribute__((always_inline))
static inline uint32_t axi_dma_ll_tx_get_interrupt_status(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->out[channel].intr.st.val;
}
/**
* @brief Enable DMA TX channel interrupt
*/
static inline void axi_dma_ll_tx_enable_interrupt(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask, bool enable)
{
if (enable) {
dev->out[channel].intr.ena.val |= mask;
} else {
dev->out[channel].intr.ena.val &= ~mask;
}
}
/**
* @brief Clear DMA TX channel interrupt
*/
__attribute__((always_inline))
static inline void axi_dma_ll_tx_clear_interrupt_status(axi_dma_dev_t *dev, uint32_t channel, uint32_t mask)
{
dev->out[channel].intr.clr.val = mask;
}
/**
* @brief Get DMA TX channel interrupt status register address
*/
static inline volatile void *axi_dma_ll_tx_get_interrupt_status_reg(axi_dma_dev_t *dev, uint32_t channel)
{
return (volatile void *)(&dev->out[channel].intr.st);
}
/**
* @brief Enable DMA TX channel to check the owner bit in the descriptor, disabled by default
*/
static inline void axi_dma_ll_tx_enable_owner_check(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->out[channel].conf.out_conf1.out_check_owner_chn = enable;
}
/**
* @brief Enable DMA TX channel burst sending data, disabled by default
*/
static inline void axi_dma_ll_tx_enable_data_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
// TODO: IDF-6504
}
/**
* @brief Enable DMA TX channel burst reading descriptor link, disabled by default
*/
static inline void axi_dma_ll_tx_enable_descriptor_burst(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->out[channel].conf.out_conf0.outdscr_burst_en_chn = enable;
}
/**
* @brief Set TX channel EOF mode
*/
static inline void axi_dma_ll_tx_set_eof_mode(axi_dma_dev_t *dev, uint32_t channel, uint32_t mode)
{
dev->out[channel].conf.out_conf0.out_eof_mode_chn = mode;
}
/**
* @brief Enable DMA TX channel automatic write results back to descriptor after all data has been sent out, disabled by default
*/
static inline void axi_dma_ll_tx_enable_auto_write_back(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->out[channel].conf.out_conf0.out_auto_wrback_chn = enable;
}
/**
* @brief Reset DMA TX channel FSM and FIFO pointer
*/
__attribute__((always_inline))
static inline void axi_dma_ll_tx_reset_channel(axi_dma_dev_t *dev, uint32_t channel)
{
dev->out[channel].conf.out_conf0.out_rst_chn = 1;
dev->out[channel].conf.out_conf0.out_rst_chn = 0;
}
/**
* @brief Push data into DMA TX FIFO
*/
static inline void axi_dma_ll_tx_push_data(axi_dma_dev_t *dev, uint32_t channel, uint32_t data)
{
dev->out[channel].conf.out_push.outfifo_wdata_chn = data;
dev->out[channel].conf.out_push.outfifo_push_chn = 1;
}
/**
* @brief Set the descriptor link base address for TX channel
*/
__attribute__((always_inline))
static inline void axi_dma_ll_tx_set_desc_addr(axi_dma_dev_t *dev, uint32_t channel, uint32_t addr)
{
dev->out[channel].conf.out_link2.outlink_addr_chn = addr;
}
/**
* @brief Start dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void axi_dma_ll_tx_start(axi_dma_dev_t *dev, uint32_t channel)
{
dev->out[channel].conf.out_link1.outlink_start_chn = 1;
}
/**
* @brief Stop dealing with TX descriptors
*/
__attribute__((always_inline))
static inline void axi_dma_ll_tx_stop(axi_dma_dev_t *dev, uint32_t channel)
{
dev->out[channel].conf.out_link1.outlink_stop_chn = 1;
}
/**
* @brief Restart a new outlink right after the last descriptor
*/
__attribute__((always_inline))
static inline void axi_dma_ll_tx_restart(axi_dma_dev_t *dev, uint32_t channel)
{
dev->out[channel].conf.out_link1.outlink_restart_chn = 1;
}
/**
* @brief Check if DMA TX FSM is in IDLE state
*/
static inline bool axi_dma_ll_tx_is_fsm_idle(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->out[channel].conf.out_link1.outlink_park_chn;
}
/**
* @brief Get TX EOF descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t axi_dma_ll_tx_get_eof_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->out[channel].conf.out_eof_des_addr.val;
}
/**
* @brief Get the pre-fetched TX descriptor's address
*/
__attribute__((always_inline))
static inline uint32_t axi_dma_ll_tx_get_prefetched_desc_addr(axi_dma_dev_t *dev, uint32_t channel)
{
return dev->out[channel].conf.out_dscr.val;
}
/**
* @brief Set priority for DMA TX channel
*/
static inline void axi_dma_ll_tx_set_priority(axi_dma_dev_t *dev, uint32_t channel, uint32_t prio)
{
dev->out[channel].conf.out_pri.tx_pri_chn = prio;
}
/**
* @brief Connect DMA TX channel to a given peripheral
*/
static inline void axi_dma_ll_tx_connect_to_periph(axi_dma_dev_t *dev, uint32_t channel, gdma_trigger_peripheral_t periph, int periph_id)
{
(void)periph;
dev->out[channel].conf.out_peri_sel.peri_out_sel_chn = periph_id;
}
/**
* @brief Disconnect DMA TX channel from peripheral
*/
static inline void axi_dma_ll_tx_disconnect_from_periph(axi_dma_dev_t *dev, uint32_t channel)
{
dev->out[channel].conf.out_peri_sel.peri_out_sel_chn = GDMA_LL_INVALID_PERIPH_ID;
}
/**
* @brief Whether to enable the ETM subsystem for TX channel
*
* @note When ETM_EN is 1, only ETM tasks can be used to configure the transfer direction and enable the channel.
*/
static inline void axi_dma_ll_tx_enable_etm_task(axi_dma_dev_t *dev, uint32_t channel, bool enable)
{
dev->out[channel].conf.out_conf0.out_etm_en_chn = enable;
}
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* @brief The contents defined in this file are common for both AXI-DMA and AHB-DMA
*/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#define GDMA_LL_CHANNEL_MAX_PRIORITY 5 // supported priority levels: [0,5]
#define GDMA_LL_RX_EVENT_MASK (0x1F)
#define GDMA_LL_TX_EVENT_MASK (0x0F)
#define GDMA_LL_EVENT_TX_TOTAL_EOF (1<<3)
#define GDMA_LL_EVENT_TX_DESC_ERROR (1<<2)
#define GDMA_LL_EVENT_TX_EOF (1<<1)
#define GDMA_LL_EVENT_TX_DONE (1<<0)
#define GDMA_LL_EVENT_RX_DESC_EMPTY (1<<4)
#define GDMA_LL_EVENT_RX_DESC_ERROR (1<<3)
#define GDMA_LL_EVENT_RX_ERR_EOF (1<<2)
#define GDMA_LL_EVENT_RX_SUC_EOF (1<<1)
#define GDMA_LL_EVENT_RX_DONE (1<<0)
#define GDMA_LL_AHB_GROUP_START_ID 0 // AHB GDMA group ID starts from 0
#define GDMA_LL_AHB_NUM_GROUPS 1 // Number of AHB GDMA groups
#define GDMA_LL_AHB_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AHB group
#define GDMA_LL_AXI_GROUP_START_ID 1 // AXI GDMA group ID starts from 1
#define GDMA_LL_AXI_NUM_GROUPS 1 // Number of AXI GDMA groups
#define GDMA_LL_AXI_PAIRS_PER_GROUP 3 // Number of GDMA pairs in each AXI group
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,167 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/soc_caps.h"
#include "hal/assert.h"
#include "hal/gdma_hal_ahb.h"
#include "hal/ahb_dma_ll.h"
static gdma_hal_priv_data_t gdma_ahb_hal_priv_data = {
.m2m_free_periph_mask = AHB_DMA_LL_M2M_FREE_PERIPH_ID_MASK,
};
void gdma_ahb_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_set_desc_addr(hal->ahb_dma_dev, chan_id, desc_base_addr);
ahb_dma_ll_rx_start(hal->ahb_dma_dev, chan_id);
} else {
ahb_dma_ll_tx_set_desc_addr(hal->ahb_dma_dev, chan_id, desc_base_addr);
ahb_dma_ll_tx_start(hal->ahb_dma_dev, chan_id);
}
}
void gdma_ahb_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_stop(hal->ahb_dma_dev, chan_id);
} else {
ahb_dma_ll_tx_stop(hal->ahb_dma_dev, chan_id);
}
}
void gdma_ahb_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_restart(hal->ahb_dma_dev, chan_id);
} else {
ahb_dma_ll_tx_restart(hal->ahb_dma_dev, chan_id);
}
}
void gdma_ahb_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_reset_channel(hal->ahb_dma_dev, chan_id);
} else {
ahb_dma_ll_tx_reset_channel(hal->ahb_dma_dev, chan_id);
}
}
void gdma_ahb_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_set_priority(hal->ahb_dma_dev, chan_id, priority);
} else {
ahb_dma_ll_tx_set_priority(hal->ahb_dma_dev, chan_id, priority);
}
}
void gdma_ahb_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_reset_channel(hal->ahb_dma_dev, chan_id); // reset channel
ahb_dma_ll_rx_connect_to_periph(hal->ahb_dma_dev, chan_id, periph, periph_sub_id);
} else {
ahb_dma_ll_tx_reset_channel(hal->ahb_dma_dev, chan_id); // reset channel
ahb_dma_ll_tx_connect_to_periph(hal->ahb_dma_dev, chan_id, periph, periph_sub_id);
}
}
void gdma_ahb_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_disconnect_from_periph(hal->ahb_dma_dev, chan_id);
} else {
ahb_dma_ll_tx_disconnect_from_periph(hal->ahb_dma_dev, chan_id);
}
}
void gdma_ahb_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_enable_data_burst(hal->ahb_dma_dev, chan_id, en_data_burst);
ahb_dma_ll_rx_enable_descriptor_burst(hal->ahb_dma_dev, chan_id, en_desc_burst);
} else {
ahb_dma_ll_tx_enable_data_burst(hal->ahb_dma_dev, chan_id, en_data_burst);
ahb_dma_ll_tx_enable_descriptor_burst(hal->ahb_dma_dev, chan_id, en_desc_burst);
}
}
void gdma_ahb_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_enable_owner_check(hal->ahb_dma_dev, chan_id, en_owner_check);
} else {
ahb_dma_ll_tx_enable_owner_check(hal->ahb_dma_dev, chan_id, en_owner_check);
ahb_dma_ll_tx_enable_auto_write_back(hal->ahb_dma_dev, chan_id, en_desc_write_back);
}
}
void gdma_ahb_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_enable_interrupt(hal->ahb_dma_dev, chan_id, intr_event_mask, en_or_dis);
} else {
ahb_dma_ll_tx_enable_interrupt(hal->ahb_dma_dev, chan_id, intr_event_mask, en_or_dis);
}
}
void gdma_ahb_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
ahb_dma_ll_rx_clear_interrupt_status(hal->ahb_dma_dev, chan_id, intr_event_mask);
} else {
ahb_dma_ll_tx_clear_interrupt_status(hal->ahb_dma_dev, chan_id, intr_event_mask);
}
}
uint32_t gdma_ahb_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return ahb_dma_ll_rx_get_interrupt_status(hal->ahb_dma_dev, chan_id);
} else {
return ahb_dma_ll_tx_get_interrupt_status(hal->ahb_dma_dev, chan_id);
}
}
uint32_t gdma_ahb_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return (uint32_t)ahb_dma_ll_rx_get_interrupt_status_reg(hal->ahb_dma_dev, chan_id);
} else {
return (uint32_t)ahb_dma_ll_tx_get_interrupt_status_reg(hal->ahb_dma_dev, chan_id);
}
}
uint32_t gdma_ahb_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return ahb_dma_ll_rx_get_success_eof_desc_addr(hal->ahb_dma_dev, chan_id);
} else {
return ahb_dma_ll_tx_get_eof_desc_addr(hal->ahb_dma_dev, chan_id);
}
}
void gdma_ahb_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
{
hal->ahb_dma_dev = AHB_DMA_LL_GET_HW(config->group_id - GDMA_LL_AHB_GROUP_START_ID);
hal->start_with_desc = gdma_ahb_hal_start_with_desc;
hal->stop = gdma_ahb_hal_stop;
hal->append = gdma_ahb_hal_append;
hal->reset = gdma_ahb_hal_reset;
hal->set_priority = gdma_ahb_hal_set_priority;
hal->connect_peri = gdma_ahb_hal_connect_peri;
hal->disconnect_peri = gdma_ahb_hal_disconnect_peri;
hal->enable_burst = gdma_ahb_hal_enable_burst;
hal->set_strategy = gdma_ahb_hal_set_strategy;
hal->enable_intr = gdma_ahb_hal_enable_intr;
hal->clear_intr = gdma_ahb_hal_clear_intr;
hal->read_intr_status = gdma_ahb_hal_read_intr_status;
hal->get_intr_status_reg = gdma_ahb_hal_get_intr_status_reg;
hal->get_eof_desc_addr = gdma_ahb_hal_get_eof_desc_addr;
hal->priv_data = &gdma_ahb_hal_priv_data;
}

View File

@@ -0,0 +1,167 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "soc/soc_caps.h"
#include "hal/assert.h"
#include "hal/gdma_hal_axi.h"
#include "hal/axi_dma_ll.h"
static gdma_hal_priv_data_t gdma_axi_hal_priv_data = {
.m2m_free_periph_mask = AXI_DMA_LL_M2M_FREE_PERIPH_ID_MASK,
};
void gdma_axi_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_set_desc_addr(hal->axi_dma_dev, chan_id, desc_base_addr);
axi_dma_ll_rx_start(hal->axi_dma_dev, chan_id);
} else {
axi_dma_ll_tx_set_desc_addr(hal->axi_dma_dev, chan_id, desc_base_addr);
axi_dma_ll_tx_start(hal->axi_dma_dev, chan_id);
}
}
void gdma_axi_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_stop(hal->axi_dma_dev, chan_id);
} else {
axi_dma_ll_tx_stop(hal->axi_dma_dev, chan_id);
}
}
void gdma_axi_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_restart(hal->axi_dma_dev, chan_id);
} else {
axi_dma_ll_tx_restart(hal->axi_dma_dev, chan_id);
}
}
void gdma_axi_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_reset_channel(hal->axi_dma_dev, chan_id);
} else {
axi_dma_ll_tx_reset_channel(hal->axi_dma_dev, chan_id);
}
}
void gdma_axi_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_set_priority(hal->axi_dma_dev, chan_id, priority);
} else {
axi_dma_ll_tx_set_priority(hal->axi_dma_dev, chan_id, priority);
}
}
void gdma_axi_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_reset_channel(hal->axi_dma_dev, chan_id); // reset channel
axi_dma_ll_rx_connect_to_periph(hal->axi_dma_dev, chan_id, periph, periph_sub_id);
} else {
axi_dma_ll_tx_reset_channel(hal->axi_dma_dev, chan_id); // reset channel
axi_dma_ll_tx_connect_to_periph(hal->axi_dma_dev, chan_id, periph, periph_sub_id);
}
}
void gdma_axi_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_disconnect_from_periph(hal->axi_dma_dev, chan_id);
} else {
axi_dma_ll_tx_disconnect_from_periph(hal->axi_dma_dev, chan_id);
}
}
void gdma_axi_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_enable_data_burst(hal->axi_dma_dev, chan_id, en_data_burst);
axi_dma_ll_rx_enable_descriptor_burst(hal->axi_dma_dev, chan_id, en_desc_burst);
} else {
axi_dma_ll_tx_enable_data_burst(hal->axi_dma_dev, chan_id, en_data_burst);
axi_dma_ll_tx_enable_descriptor_burst(hal->axi_dma_dev, chan_id, en_desc_burst);
}
}
void gdma_axi_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_enable_owner_check(hal->axi_dma_dev, chan_id, en_owner_check);
} else {
axi_dma_ll_tx_enable_owner_check(hal->axi_dma_dev, chan_id, en_owner_check);
axi_dma_ll_tx_enable_auto_write_back(hal->axi_dma_dev, chan_id, en_desc_write_back);
}
}
void gdma_axi_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_enable_interrupt(hal->axi_dma_dev, chan_id, intr_event_mask, en_or_dis);
} else {
axi_dma_ll_tx_enable_interrupt(hal->axi_dma_dev, chan_id, intr_event_mask, en_or_dis);
}
}
void gdma_axi_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
axi_dma_ll_rx_clear_interrupt_status(hal->axi_dma_dev, chan_id, intr_event_mask);
} else {
axi_dma_ll_tx_clear_interrupt_status(hal->axi_dma_dev, chan_id, intr_event_mask);
}
}
uint32_t gdma_axi_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return axi_dma_ll_rx_get_interrupt_status(hal->axi_dma_dev, chan_id);
} else {
return axi_dma_ll_tx_get_interrupt_status(hal->axi_dma_dev, chan_id);
}
}
uint32_t gdma_axi_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return (uint32_t)axi_dma_ll_rx_get_interrupt_status_reg(hal->axi_dma_dev, chan_id);
} else {
return (uint32_t)axi_dma_ll_tx_get_interrupt_status_reg(hal->axi_dma_dev, chan_id);
}
}
uint32_t gdma_axi_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir)
{
if (dir == GDMA_CHANNEL_DIRECTION_RX) {
return axi_dma_ll_rx_get_success_eof_desc_addr(hal->axi_dma_dev, chan_id);
} else {
return axi_dma_ll_tx_get_eof_desc_addr(hal->axi_dma_dev, chan_id);
}
}
void gdma_axi_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config)
{
hal->axi_dma_dev = AXI_DMA_LL_GET_HW(config->group_id - GDMA_LL_AXI_GROUP_START_ID);
hal->start_with_desc = gdma_axi_hal_start_with_desc;
hal->stop = gdma_axi_hal_stop;
hal->append = gdma_axi_hal_append;
hal->reset = gdma_axi_hal_reset;
hal->set_priority = gdma_axi_hal_set_priority;
hal->connect_peri = gdma_axi_hal_connect_peri;
hal->disconnect_peri = gdma_axi_hal_disconnect_peri;
hal->enable_burst = gdma_axi_hal_enable_burst;
hal->set_strategy = gdma_axi_hal_set_strategy;
hal->enable_intr = gdma_axi_hal_enable_intr;
hal->clear_intr = gdma_axi_hal_clear_intr;
hal->read_intr_status = gdma_axi_hal_read_intr_status;
hal->get_intr_status_reg = gdma_axi_hal_get_intr_status_reg;
hal->get_eof_desc_addr = gdma_axi_hal_get_eof_desc_addr;
hal->priv_data = &gdma_axi_hal_priv_data;
}

View File

@@ -0,0 +1,49 @@
/*
* SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "hal/gdma_hal.h"
#ifdef __cplusplus
extern "C" {
#endif
void gdma_axi_hal_start_with_desc(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, intptr_t desc_base_addr);
void gdma_axi_hal_stop(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_axi_hal_append(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_axi_hal_reset(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_axi_hal_set_priority(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t priority);
void gdma_axi_hal_connect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, gdma_trigger_peripheral_t periph, int periph_sub_id);
void gdma_axi_hal_disconnect_peri(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_axi_hal_enable_burst(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_data_burst, bool en_desc_burst);
void gdma_axi_hal_set_ext_mem_align(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint8_t align);
void gdma_axi_hal_set_strategy(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, bool en_owner_check, bool en_desc_write_back);
void gdma_axi_hal_enable_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask, bool en_or_dis);
void gdma_axi_hal_clear_intr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir, uint32_t intr_event_mask);
uint32_t gdma_axi_hal_read_intr_status(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
uint32_t gdma_axi_hal_get_intr_status_reg(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
uint32_t gdma_axi_hal_get_eof_desc_addr(gdma_hal_context_t *hal, int chan_id, gdma_channel_direction_t dir);
void gdma_axi_hal_init(gdma_hal_context_t *hal, const gdma_hal_config_t *config);
#ifdef __cplusplus
}
#endif

View File

@@ -27,6 +27,7 @@ typedef enum {
GDMA_TRIG_PERIPH_CAM, /*!< GDMA trigger peripheral: CAM */ GDMA_TRIG_PERIPH_CAM, /*!< GDMA trigger peripheral: CAM */
GDMA_TRIG_PERIPH_RMT, /*!< GDMA trigger peripheral: RMT */ GDMA_TRIG_PERIPH_RMT, /*!< GDMA trigger peripheral: RMT */
GDMA_TRIG_PERIPH_PARLIO, /*!< GDMA trigger peripheral: PARLIO */ GDMA_TRIG_PERIPH_PARLIO, /*!< GDMA trigger peripheral: PARLIO */
GDMA_TRIG_PERIPH_I3C, /*!< GDMA trigger peripheral: I3C */
} gdma_trigger_peripheral_t; } gdma_trigger_peripheral_t;
/** /**

View File

@@ -195,17 +195,21 @@ config SOC_DS_KEY_CHECK_MAX_WAIT_US
int int
default 1100 default 1100
config SOC_GDMA_GROUPS config SOC_AHB_GDMA_VERSION
int int
default 1 default 2
config SOC_GDMA_PAIRS_PER_GROUP config SOC_GDMA_NUM_GROUPS_MAX
int
default 2
config SOC_GDMA_PAIRS_PER_GROUP_MAX
int int
default 3 default 3
config SOC_GDMA_SUPPORT_ETM config SOC_GDMA_SUPPORT_ETM
bool bool
default n default y
config SOC_ETM_GROUPS config SOC_ETM_GROUPS
int int

View File

@@ -318,7 +318,6 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_out_int_clr_chn_reg_t; } ahb_dma_out_int_clr_chn_reg_t;
/** Group: Debug Registers */ /** Group: Debug Registers */
/** Type of ahb_test register /** Type of ahb_test register
* reserved * reserved
@@ -339,7 +338,6 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_ahb_test_reg_t; } ahb_dma_ahb_test_reg_t;
/** Group: Configuration Registers */ /** Group: Configuration Registers */
/** Type of misc_conf register /** Type of misc_conf register
* MISC register * MISC register
@@ -965,7 +963,6 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_weight_en_rx_reg_t; } ahb_dma_weight_en_rx_reg_t;
/** Group: Version Registers */ /** Group: Version Registers */
/** Type of date register /** Type of date register
* Version control register * Version control register
@@ -980,7 +977,6 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_date_reg_t; } ahb_dma_date_reg_t;
/** Group: Status Registers */ /** Group: Status Registers */
/** Type of infifo_status_chn register /** Type of infifo_status_chn register
* Receive FIFO status of Rx channel 0 * Receive FIFO status of Rx channel 0
@@ -1242,7 +1238,6 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_out_dscr_bf1_chn_reg_t; } ahb_dma_out_dscr_bf1_chn_reg_t;
/** Group: Priority Registers */ /** Group: Priority Registers */
/** Type of in_pri_chn register /** Type of in_pri_chn register
* Priority register of Rx channel 0 * Priority register of Rx channel 0
@@ -1272,7 +1267,6 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_out_pri_chn_reg_t; } ahb_dma_out_pri_chn_reg_t;
/** Group: Peripheral Select Registers */ /** Group: Peripheral Select Registers */
/** Type of in_peri_sel_chn register /** Type of in_peri_sel_chn register
* Peripheral selection of Rx channel 0 * Peripheral selection of Rx channel 0
@@ -1306,20 +1300,18 @@ typedef union {
uint32_t val; uint32_t val;
} ahb_dma_out_peri_sel_chn_reg_t; } ahb_dma_out_peri_sel_chn_reg_t;
typedef struct { typedef struct {
volatile ahb_dma_in_int_raw_chn_reg_t in_int_raw; volatile ahb_dma_in_int_raw_chn_reg_t raw;
volatile ahb_dma_in_int_st_chn_reg_t in_int_st; volatile ahb_dma_in_int_st_chn_reg_t st;
volatile ahb_dma_in_int_ena_chn_reg_t in_int_ena; volatile ahb_dma_in_int_ena_chn_reg_t ena;
volatile ahb_dma_in_int_clr_chn_reg_t in_int_clr; volatile ahb_dma_in_int_clr_chn_reg_t clr;
} ahb_dma_in_int_chn_reg_t; } ahb_dma_in_int_chn_reg_t;
typedef struct { typedef struct {
volatile ahb_dma_out_int_raw_chn_reg_t out_int_raw; volatile ahb_dma_out_int_raw_chn_reg_t raw;
volatile ahb_dma_out_int_st_chn_reg_t out_int_st; volatile ahb_dma_out_int_st_chn_reg_t st;
volatile ahb_dma_out_int_ena_chn_reg_t out_int_ena; volatile ahb_dma_out_int_ena_chn_reg_t ena;
volatile ahb_dma_out_int_clr_chn_reg_t out_int_clr; volatile ahb_dma_out_int_clr_chn_reg_t clr;
} ahb_dma_out_int_chn_reg_t; } ahb_dma_out_int_chn_reg_t;
typedef struct { typedef struct {
@@ -1410,7 +1402,6 @@ typedef struct {
extern ahb_dma_dev_t AHB_DMA; extern ahb_dma_dev_t AHB_DMA;
#ifndef __cplusplus #ifndef __cplusplus
_Static_assert(sizeof(ahb_dma_dev_t) == 0x3DC, "Invalid size of ahb_dma_dev_t structure"); _Static_assert(sizeof(ahb_dma_dev_t) == 0x3DC, "Invalid size of ahb_dma_dev_t structure");
#endif #endif

View File

@@ -703,7 +703,6 @@ typedef union {
uint32_t val; uint32_t val;
} axi_dma_rx_crc_data_en_addr_chn_reg_t; } axi_dma_rx_crc_data_en_addr_chn_reg_t;
/** Group: out */ /** Group: out */
/** Type of out_int_raw_chn register /** Type of out_int_raw_chn register
* Raw status interrupt of channel0 * Raw status interrupt of channel0
@@ -917,55 +916,55 @@ typedef union {
uint32_t val; uint32_t val;
} axi_dma_out_int_clr_chn_reg_t; } axi_dma_out_int_clr_chn_reg_t;
/** Type of out_conf0_ch0 register /** Type of out_conf0_chn register
* Configure 0 register of Tx channel0 * Configure 0 register of Tx channelN
*/ */
typedef union { typedef union {
struct { struct {
/** out_rst_ch0 : R/W; bitpos: [0]; default: 0; /** out_rst_chn : R/W; bitpos: [0]; default: 0;
* This bit is used to reset AXI_DMA channel0 Tx FSM and Tx FIFO pointer. * This bit is used to reset AXI_DMA channel0 Tx FSM and Tx FIFO pointer.
*/ */
uint32_t out_rst_ch0:1; uint32_t out_rst_chn: 1;
/** out_loop_test_ch0 : R/W; bitpos: [1]; default: 0; /** out_loop_test_chn : R/W; bitpos: [1]; default: 0;
* reserved * reserved
*/ */
uint32_t out_loop_test_ch0:1; uint32_t out_loop_test_chn: 1;
/** out_auto_wrback_ch0 : R/W; bitpos: [2]; default: 0; /** out_auto_wrback_chn : R/W; bitpos: [2]; default: 0;
* Set this bit to enable automatic outlink-writeback when all the data in tx buffer * Set this bit to enable automatic outlink-writeback when all the data in tx buffer
* has been transmitted. * has been transmitted.
*/ */
uint32_t out_auto_wrback_ch0:1; uint32_t out_auto_wrback_chn: 1;
/** out_eof_mode_ch0 : R/W; bitpos: [3]; default: 1; /** out_eof_mode_chn : R/W; bitpos: [3]; default: 1;
* EOF flag generation mode when transmitting data. 1: EOF flag for Tx channel0 is * EOF flag generation mode when transmitting data. 1: EOF flag for Tx channel0 is
* generated when data need to transmit has been popped from FIFO in AXI_DMA * generated when data need to transmit has been popped from FIFO in AXI_DMA
*/ */
uint32_t out_eof_mode_ch0:1; uint32_t out_eof_mode_chn: 1;
/** out_etm_en_ch0 : R/W; bitpos: [4]; default: 0; /** out_etm_en_chn : R/W; bitpos: [4]; default: 0;
* Set this bit to 1 to enable etm control mode, dma Tx channel0 is triggered by etm * Set this bit to 1 to enable etm control mode, dma Tx channel0 is triggered by etm
* task. * task.
*/ */
uint32_t out_etm_en_ch0:1; uint32_t out_etm_en_chn: 1;
/** out_burst_size_sel_ch0 : R/W; bitpos: [7:5]; default: 0; /** out_burst_size_sel_chn : R/W; bitpos: [7:5]; default: 0;
* 3'b000-3'b100:burst length 8byte~128byte * 3'b000-3'b100:burst length 8byte~128byte
*/ */
uint32_t out_burst_size_sel_ch0:3; uint32_t out_burst_size_sel_chn: 3;
/** out_cmd_disable_ch0 : R/W; bitpos: [8]; default: 0; /** out_cmd_disable_chn : R/W; bitpos: [8]; default: 0;
* 1:mean disable cmd of this ch0 * 1:mean disable cmd of this chn
*/ */
uint32_t out_cmd_disable_ch0:1; uint32_t out_cmd_disable_chn: 1;
/** out_ecc_aec_en_ch0 : R/W; bitpos: [9]; default: 0; /** out_ecc_aec_en_chn : R/W; bitpos: [9]; default: 0;
* 1: mean access ecc or aes domain,0: mean not * 1: mean access ecc or aes domain,0: mean not
*/ */
uint32_t out_ecc_aec_en_ch0:1; uint32_t out_ecc_aec_en_chn: 1;
/** outdscr_burst_en_ch0 : R/W; bitpos: [10]; default: 0; /** outdscr_burst_en_chn : R/W; bitpos: [10]; default: 0;
* Set this bit to 1 to enable INCR burst transfer for Tx channel0 reading link * Set this bit to 1 to enable INCR burst transfer for Tx channel0 reading link
* descriptor when accessing internal SRAM. * descriptor when accessing internal SRAM.
*/ */
uint32_t outdscr_burst_en_ch0:1; uint32_t outdscr_burst_en_chn: 1;
uint32_t reserved_11: 21; uint32_t reserved_11: 21;
}; };
uint32_t val; uint32_t val;
} axi_dma_out_conf0_ch0_reg_t; } axi_dma_out_conf0_chn_reg_t;
/** Type of out_conf1_chn register /** Type of out_conf1_chn register
* Configure 1 register of Tx channel0 * Configure 1 register of Tx channel0
@@ -1475,7 +1474,6 @@ typedef union {
uint32_t val; uint32_t val;
} axi_dma_out_conf0_ch2_reg_t; } axi_dma_out_conf0_ch2_reg_t;
/** Group: Configuration Registers */ /** Group: Configuration Registers */
/** Type of arb_timeout register /** Type of arb_timeout register
* This retister is used to config arbiter time slice * This retister is used to config arbiter time slice
@@ -1660,7 +1658,6 @@ typedef union {
uint32_t val; uint32_t val;
} axi_dma_misc_conf_reg_t; } axi_dma_misc_conf_reg_t;
/** Group: Debug Registers */ /** Group: Debug Registers */
/** Type of rdn_result register /** Type of rdn_result register
* reserved * reserved
@@ -1706,7 +1703,6 @@ typedef union {
uint32_t val; uint32_t val;
} axi_dma_rdn_eco_low_reg_t; } axi_dma_rdn_eco_low_reg_t;
/** Group: Status Registers */ /** Group: Status Registers */
/** Type of wresp_cnt register /** Type of wresp_cnt register
* AXI wr responce cnt register. * AXI wr responce cnt register.
@@ -1772,7 +1768,6 @@ typedef union {
uint32_t val; uint32_t val;
} axi_dma_outfifo_status1_chn_reg_t; } axi_dma_outfifo_status1_chn_reg_t;
/** Group: Version Registers */ /** Group: Version Registers */
/** Type of date register /** Type of date register
* Version control register * Version control register
@@ -1788,10 +1783,10 @@ typedef union {
} axi_dma_date_reg_t; } axi_dma_date_reg_t;
typedef struct { typedef struct {
volatile axi_dma_in_int_raw_chn_reg_t in_int_raw; volatile axi_dma_in_int_raw_chn_reg_t raw;
volatile axi_dma_in_int_st_chn_reg_t in_int_st; volatile axi_dma_in_int_st_chn_reg_t st;
volatile axi_dma_in_int_ena_chn_reg_t in_int_ena; volatile axi_dma_in_int_ena_chn_reg_t ena;
volatile axi_dma_in_int_clr_chn_reg_t in_int_clr; volatile axi_dma_in_int_clr_chn_reg_t clr;
} axi_dma_in_int_chn_reg_t; } axi_dma_in_int_chn_reg_t;
typedef struct { typedef struct {
@@ -1823,20 +1818,20 @@ typedef struct {
} axi_dma_in_crc_chn_reg_t; } axi_dma_in_crc_chn_reg_t;
typedef struct { typedef struct {
volatile axi_dma_in_int_chn_reg_t in_intr; volatile axi_dma_in_int_chn_reg_t intr;
volatile axi_dma_in_conf_chn_reg_t in_conf; volatile axi_dma_in_conf_chn_reg_t conf;
volatile axi_dma_in_crc_chn_reg_t in_crc; volatile axi_dma_in_crc_chn_reg_t crc;
} axi_dma_in_reg_t; } axi_dma_in_reg_t;
typedef struct { typedef struct {
volatile axi_dma_out_int_raw_chn_reg_t out_int_raw; volatile axi_dma_out_int_raw_chn_reg_t raw;
volatile axi_dma_out_int_st_chn_reg_t out_int_st; volatile axi_dma_out_int_st_chn_reg_t st;
volatile axi_dma_out_int_ena_chn_reg_t out_int_ena; volatile axi_dma_out_int_ena_chn_reg_t ena;
volatile axi_dma_out_int_clr_chn_reg_t out_int_clr; volatile axi_dma_out_int_clr_chn_reg_t clr;
} axi_dma_out_int_chn_reg_t; } axi_dma_out_int_chn_reg_t;
typedef struct { typedef struct {
volatile axi_dma_out_conf0_ch0_reg_t out_conf0; volatile axi_dma_out_conf0_chn_reg_t out_conf0;
volatile axi_dma_out_conf1_chn_reg_t out_conf1; volatile axi_dma_out_conf1_chn_reg_t out_conf1;
volatile axi_dma_outfifo_status_chn_reg_t outfifo_status; volatile axi_dma_outfifo_status_chn_reg_t outfifo_status;
volatile axi_dma_out_push_chn_reg_t out_push; volatile axi_dma_out_push_chn_reg_t out_push;
@@ -1864,9 +1859,9 @@ typedef struct {
} axi_dma_out_crc_chn_reg_t; } axi_dma_out_crc_chn_reg_t;
typedef struct { typedef struct {
volatile axi_dma_out_int_chn_reg_t in_intr; volatile axi_dma_out_int_chn_reg_t intr;
volatile axi_dma_out_conf_chn_reg_t in_conf; volatile axi_dma_out_conf_chn_reg_t conf;
volatile axi_dma_out_crc_chn_reg_t in_crc; volatile axi_dma_out_crc_chn_reg_t crc;
} axi_dma_out_reg_t; } axi_dma_out_reg_t;
typedef struct { typedef struct {
@@ -1893,6 +1888,7 @@ typedef struct {
volatile axi_dma_date_reg_t date; volatile axi_dma_date_reg_t date;
} axi_dma_dev_t; } axi_dma_dev_t;
extern axi_dma_dev_t AXI_DMA;
#ifndef __cplusplus #ifndef __cplusplus
_Static_assert(sizeof(axi_dma_dev_t) == 0x2dc, "Invalid size of axi_dma_dev_t structure"); _Static_assert(sizeof(axi_dma_dev_t) == 0x2dc, "Invalid size of axi_dma_dev_t structure");

View File

@@ -5,3 +5,41 @@
*/ */
#pragma once #pragma once
// The following macros have a format SOC_[periph][instance_id] to make it work with `GDMA_MAKE_TRIGGER`
#define SOC_GDMA_TRIG_PERIPH_M2M0 -1
#define SOC_GDMA_TRIG_PERIPH_I3C0 0
#define SOC_GDMA_TRIG_PERIPH_UHCI0 2
#define SOC_GDMA_TRIG_PERIPH_I2S0 3
#define SOC_GDMA_TRIG_PERIPH_I2S1 4
#define SOC_GDMA_TRIG_PERIPH_I2S2 5
#define SOC_GDMA_TRIG_PERIPH_ADC0 8
#define SOC_GDMA_TRIG_PERIPH_RMT0 10
#define SOC_GDMA_TRIG_PERIPH_LCD0 0
#define SOC_GDMA_TRIG_PERIPH_CAM0 0
#define SOC_GDMA_TRIG_PERIPH_SPI2 1
#define SOC_GDMA_TRIG_PERIPH_SPI3 2
#define SOC_GDMA_TRIG_PERIPH_PARLIO0 3
#define SOC_GDMA_TRIG_PERIPH_AES0 4
#define SOC_GDMA_TRIG_PERIPH_SHA0 5
// On which system bus is the DMA instance of the peripheral connection mounted
#define SOC_GDMA_BUS_ANY -1
#define SOC_GDMA_BUS_AHB 0
#define SOC_GDMA_BUS_AXI 1
#define SOC_GDMA_TRIG_PERIPH_M2M0_BUS SOC_GDMA_BUS_ANY
#define SOC_GDMA_TRIG_PERIPH_UHCI0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S1_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I2S2_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_ADC0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_RMT0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_I3C0_BUS SOC_GDMA_BUS_AHB
#define SOC_GDMA_TRIG_PERIPH_SPI2_BUS SOC_GDMA_BUS_AXI
#define SOC_GDMA_TRIG_PERIPH_SPI3_BUS SOC_GDMA_BUS_AXI
#define SOC_GDMA_TRIG_PERIPH_LCD0_BUS SOC_GDMA_BUS_AXI
#define SOC_GDMA_TRIG_PERIPH_CAM0_BUS SOC_GDMA_BUS_AXI
#define SOC_GDMA_TRIG_PERIPH_AES0_BUS SOC_GDMA_BUS_AXI
#define SOC_GDMA_TRIG_PERIPH_SHA0_BUS SOC_GDMA_BUS_AXI
#define SOC_GDMA_TRIG_PERIPH_PARLIO0_BUS SOC_GDMA_BUS_AXI

View File

@@ -30,6 +30,8 @@
// #define SOC_DEDICATED_GPIO_SUPPORTED 1 //TODO: IDF-7552 // #define SOC_DEDICATED_GPIO_SUPPORTED 1 //TODO: IDF-7552
#define SOC_UART_SUPPORTED 1 #define SOC_UART_SUPPORTED 1
// #define SOC_GDMA_SUPPORTED 1 //TODO: IDF-6504 // #define SOC_GDMA_SUPPORTED 1 //TODO: IDF-6504
// #define SOC_AHB_GDMA_SUPPORTED 1
// #define SOC_AXI_GDMA_SUPPORTED 1
// #define SOC_GPTIMER_SUPPORTED 1 //TODO: IDF-6515 // #define SOC_GPTIMER_SUPPORTED 1 //TODO: IDF-6515
// #define SOC_PCNT_SUPPORTED 1 //TODO: IDF-7475 // #define SOC_PCNT_SUPPORTED 1 //TODO: IDF-7475
// #define SOC_MCPWM_SUPPORTED 1 //TODO: IDF-7493 // #define SOC_MCPWM_SUPPORTED 1 //TODO: IDF-7493
@@ -154,9 +156,10 @@
#define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100) #define SOC_DS_KEY_CHECK_MAX_WAIT_US (1100)
/*-------------------------- GDMA CAPS -------------------------------------*/ /*-------------------------- GDMA CAPS -------------------------------------*/
#define SOC_GDMA_GROUPS (1U) // Number of GDMA groups #define SOC_AHB_GDMA_VERSION 2
#define SOC_GDMA_PAIRS_PER_GROUP (3) // Number of GDMA pairs in each group #define SOC_GDMA_NUM_GROUPS_MAX 2
#define SOC_GDMA_SUPPORT_ETM (0) // Support ETM submodule #define SOC_GDMA_PAIRS_PER_GROUP_MAX 3
#define SOC_GDMA_SUPPORT_ETM 1 // Both AHB-DMA and AXI-DMA supports ETM
/*-------------------------- ETM CAPS --------------------------------------*/ /*-------------------------- ETM CAPS --------------------------------------*/
#define SOC_ETM_GROUPS 1U // Number of ETM groups #define SOC_ETM_GROUPS 1U // Number of ETM groups
@@ -421,7 +424,6 @@
/*-------------------------- MEMPROT CAPS ------------------------------------*/ /*-------------------------- MEMPROT CAPS ------------------------------------*/
/*-------------------------- UART CAPS ---------------------------------------*/ /*-------------------------- UART CAPS ---------------------------------------*/
// ESP32-P4 has 2 UARTs // ESP32-P4 has 2 UARTs
#define SOC_UART_NUM (2) #define SOC_UART_NUM (2)