mirror of
https://github.com/espressif/esp-idf.git
synced 2025-07-31 19:24:33 +02:00
async_mcp: apply gdma driver
This commit is contained in:
@@ -48,6 +48,7 @@ typedef struct {
|
||||
*/
|
||||
typedef struct async_memcpy_context_t {
|
||||
async_memcpy_impl_t mcp_impl; // implementation layer
|
||||
portMUX_TYPE spinlock; // spinlock, prevent operating descriptors concurrently
|
||||
intr_handle_t intr_hdl; // interrupt handle
|
||||
uint32_t flags; // extra driver flags
|
||||
dma_descriptor_t *tx_desc; // pointer to the next free TX descriptor
|
||||
@@ -73,11 +74,6 @@ esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_me
|
||||
mcp_hdl = heap_caps_calloc(1, total_malloc_size, MALLOC_CAP_8BIT | MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
|
||||
ASMCP_CHECK(mcp_hdl, "allocate context memory failed", err, ESP_ERR_NO_MEM);
|
||||
|
||||
int int_flags = ESP_INTR_FLAG_IRAM; // interrupt can still work when cache is disabled
|
||||
// allocate interrupt handle, it's target dependent
|
||||
ret_code = async_memcpy_impl_allocate_intr(&mcp_hdl->mcp_impl, int_flags, &mcp_hdl->intr_hdl);
|
||||
ASMCP_CHECK(ret_code == ESP_OK, "allocate interrupt handle failed", err, ret_code);
|
||||
|
||||
mcp_hdl->flags = config->flags;
|
||||
mcp_hdl->out_streams = mcp_hdl->streams_pool;
|
||||
mcp_hdl->in_streams = mcp_hdl->streams_pool + config->backlog;
|
||||
@@ -96,20 +92,18 @@ esp_err_t esp_async_memcpy_install(const async_memcpy_config_t *config, async_me
|
||||
mcp_hdl->tx_desc = &mcp_hdl->out_streams[0].desc;
|
||||
mcp_hdl->rx_desc = &mcp_hdl->in_streams[0].desc;
|
||||
mcp_hdl->next_rx_desc_to_check = &mcp_hdl->in_streams[0].desc;
|
||||
mcp_hdl->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
|
||||
// initialize implementation layer
|
||||
async_memcpy_impl_init(&mcp_hdl->mcp_impl, &mcp_hdl->out_streams[0].desc, &mcp_hdl->in_streams[0].desc);
|
||||
async_memcpy_impl_init(&mcp_hdl->mcp_impl);
|
||||
|
||||
*asmcp = mcp_hdl;
|
||||
|
||||
async_memcpy_impl_start(&mcp_hdl->mcp_impl);
|
||||
async_memcpy_impl_start(&mcp_hdl->mcp_impl, (intptr_t)&mcp_hdl->out_streams[0].desc, (intptr_t)&mcp_hdl->in_streams[0].desc);
|
||||
|
||||
return ESP_OK;
|
||||
err:
|
||||
if (mcp_hdl) {
|
||||
if (mcp_hdl->intr_hdl) {
|
||||
esp_intr_free(mcp_hdl->intr_hdl);
|
||||
}
|
||||
free(mcp_hdl);
|
||||
}
|
||||
if (asmcp) {
|
||||
@@ -123,7 +117,6 @@ esp_err_t esp_async_memcpy_uninstall(async_memcpy_t asmcp)
|
||||
esp_err_t ret_code = ESP_OK;
|
||||
ASMCP_CHECK(asmcp, "mcp handle can't be null", err, ESP_ERR_INVALID_ARG);
|
||||
|
||||
esp_intr_free(asmcp->intr_hdl);
|
||||
async_memcpy_impl_stop(&asmcp->mcp_impl);
|
||||
async_memcpy_impl_deinit(&asmcp->mcp_impl);
|
||||
free(asmcp);
|
||||
@@ -243,7 +236,7 @@ esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n,
|
||||
ASMCP_CHECK(n <= DMA_DESCRIPTOR_BUFFER_MAX_SIZE * asmcp->max_stream_num, "buffer size too large", err, ESP_ERR_INVALID_ARG);
|
||||
|
||||
// Prepare TX and RX descriptor
|
||||
portENTER_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock);
|
||||
portENTER_CRITICAL_SAFE(&asmcp->spinlock);
|
||||
rx_prepared_size = async_memcpy_prepare_receive(asmcp, dst, n, &rx_start_desc, &rx_end_desc);
|
||||
tx_prepared_size = async_memcpy_prepare_transmit(asmcp, src, n, &tx_start_desc, &tx_end_desc);
|
||||
if ((rx_prepared_size == n) && (tx_prepared_size == n)) {
|
||||
@@ -269,7 +262,7 @@ esp_err_t esp_async_memcpy(async_memcpy_t asmcp, void *dst, void *src, size_t n,
|
||||
asmcp->tx_desc = desc->next;
|
||||
async_memcpy_impl_restart(&asmcp->mcp_impl);
|
||||
}
|
||||
portEXIT_CRITICAL_SAFE(&asmcp->mcp_impl.hal_lock);
|
||||
portEXIT_CRITICAL_SAFE(&asmcp->spinlock);
|
||||
|
||||
// It's unlikely that we have space for rx descriptor but no space for tx descriptor
|
||||
// Both tx and rx descriptor should move in the same pace
|
||||
@@ -289,14 +282,14 @@ IRAM_ATTR void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl)
|
||||
async_memcpy_context_t *asmcp = __containerof(impl, async_memcpy_context_t, mcp_impl);
|
||||
|
||||
// get the RX eof descriptor address
|
||||
dma_descriptor_t *eof = async_memcpy_impl_get_rx_suc_eof_descriptor(impl);
|
||||
dma_descriptor_t *eof = (dma_descriptor_t *)impl->rx_eof_addr;
|
||||
// traversal all unchecked descriptors
|
||||
do {
|
||||
portENTER_CRITICAL_ISR(&impl->hal_lock);
|
||||
portENTER_CRITICAL_ISR(&asmcp->spinlock);
|
||||
// There is an assumption that the usage of rx descriptors are in the same pace as tx descriptors (this is determined by M2M DMA working mechanism)
|
||||
// And once the rx descriptor is recycled, the corresponding tx desc is guaranteed to be returned by DMA
|
||||
to_continue = async_memcpy_get_next_rx_descriptor(asmcp, eof, &next_desc);
|
||||
portEXIT_CRITICAL_ISR(&impl->hal_lock);
|
||||
portEXIT_CRITICAL_ISR(&asmcp->spinlock);
|
||||
if (next_desc) {
|
||||
in_stream = __containerof(next_desc, async_memcpy_stream_t, desc);
|
||||
// invoke user registered callback if available
|
||||
|
@@ -24,76 +24,85 @@
|
||||
#include "esp_err.h"
|
||||
#include "esp_async_memcpy_impl.h"
|
||||
|
||||
IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
|
||||
IRAM_ATTR static bool async_memcpy_impl_rx_eof_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
|
||||
{
|
||||
async_memcpy_impl_t *mcp_impl = (async_memcpy_impl_t *)args;
|
||||
async_memcpy_impl_t *mcp_impl = (async_memcpy_impl_t *)user_data;
|
||||
mcp_impl->rx_eof_addr = event_data->rx_eof_desc_addr;
|
||||
|
||||
portENTER_CRITICAL_ISR(&mcp_impl->hal_lock);
|
||||
uint32_t status = gdma_ll_get_interrupt_status(mcp_impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_clear_interrupt_status(mcp_impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, status);
|
||||
portEXIT_CRITICAL_ISR(&mcp_impl->hal_lock);
|
||||
|
||||
// End-Of-Frame on RX side
|
||||
if (status & GDMA_LL_EVENT_RX_SUC_EOF) {
|
||||
async_memcpy_isr_on_rx_done_event(mcp_impl);
|
||||
}
|
||||
|
||||
if (mcp_impl->isr_need_yield) {
|
||||
mcp_impl->isr_need_yield = false;
|
||||
portYIELD_FROM_ISR();
|
||||
}
|
||||
async_memcpy_isr_on_rx_done_event(mcp_impl);
|
||||
return mcp_impl->isr_need_yield;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr)
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return esp_intr_alloc(ETS_DMA_CH0_INTR_SOURCE, int_flags, async_memcpy_impl_default_isr_handler, impl, intr);
|
||||
}
|
||||
esp_err_t ret = ESP_OK;
|
||||
// create TX channel and reserve sibling channel for future use
|
||||
gdma_channel_alloc_config_t tx_alloc_config = {
|
||||
.flags.reserve_sibling = 1,
|
||||
.direction = GDMA_CHANNEL_DIRECTION_TX,
|
||||
};
|
||||
ret = gdma_new_channel(&tx_alloc_config, &impl->tx_channel);
|
||||
if (ret != ESP_OK) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base)
|
||||
{
|
||||
impl->hal_lock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
impl->hal.dev = &GDMA;
|
||||
periph_module_enable(PERIPH_GDMA_MODULE);
|
||||
gdma_ll_enable_clock(impl->hal.dev, true);
|
||||
gdma_ll_tx_reset_channel(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_rx_reset_channel(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, UINT32_MAX, true);
|
||||
gdma_ll_clear_interrupt_status(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, UINT32_MAX);
|
||||
gdma_ll_enable_m2m_mode(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_tx_enable_auto_write_back(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_tx_enable_owner_check(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_rx_enable_owner_check(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, true);
|
||||
gdma_ll_tx_set_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, (uint32_t)outlink_base);
|
||||
gdma_ll_rx_set_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, (uint32_t)inlink_base);
|
||||
return ESP_OK;
|
||||
// create RX channel and specify it should be reside in the same pair as TX
|
||||
gdma_channel_alloc_config_t rx_alloc_config = {
|
||||
.direction = GDMA_CHANNEL_DIRECTION_RX,
|
||||
.sibling_chan = impl->tx_channel,
|
||||
};
|
||||
ret = gdma_new_channel(&rx_alloc_config, &impl->rx_channel);
|
||||
if (ret != ESP_OK) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
gdma_connect(impl->rx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0));
|
||||
gdma_connect(impl->tx_channel, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0));
|
||||
|
||||
gdma_strategy_config_t strategy_config = {
|
||||
.auto_update_desc = true,
|
||||
.owner_check = true
|
||||
};
|
||||
|
||||
gdma_apply_strategy(impl->tx_channel, &strategy_config);
|
||||
gdma_apply_strategy(impl->rx_channel, &strategy_config);
|
||||
|
||||
gdma_rx_event_callbacks_t cbs = {
|
||||
.on_recv_eof = async_memcpy_impl_rx_eof_callback
|
||||
};
|
||||
ret = gdma_register_rx_event_callbacks(impl->rx_channel, &cbs, impl);
|
||||
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl)
|
||||
{
|
||||
periph_module_disable(PERIPH_GDMA_MODULE);
|
||||
gdma_disconnect(impl->rx_channel);
|
||||
gdma_disconnect(impl->tx_channel);
|
||||
gdma_del_channel(impl->rx_channel);
|
||||
gdma_del_channel(impl->tx_channel);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl)
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl, intptr_t outlink_base, intptr_t inlink_base)
|
||||
{
|
||||
gdma_ll_rx_start(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_tx_start(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, GDMA_LL_EVENT_RX_SUC_EOF, true);
|
||||
gdma_start(impl->rx_channel, inlink_base);
|
||||
gdma_start(impl->tx_channel, outlink_base);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_stop(async_memcpy_impl_t *impl)
|
||||
{
|
||||
gdma_ll_enable_interrupt(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL, GDMA_LL_EVENT_RX_SUC_EOF, false);
|
||||
gdma_ll_rx_stop(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_tx_stop(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_stop(impl->rx_channel);
|
||||
gdma_stop(impl->tx_channel);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl)
|
||||
{
|
||||
gdma_ll_rx_restart(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_ll_tx_restart(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
gdma_append(impl->rx_channel);
|
||||
gdma_append(impl->tx_channel);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
@@ -101,8 +110,3 @@ bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return (dma_descriptor_t *)gdma_ll_rx_get_success_eof_desc_addr(impl->hal.dev, SOC_GDMA_M2M_DMA_CHANNEL);
|
||||
}
|
||||
|
@@ -24,8 +24,7 @@
|
||||
#include "hal/cp_dma_ll.h"
|
||||
#include "hal/cp_dma_hal.h"
|
||||
#elif SOC_GDMA_SUPPORTED
|
||||
#include "hal/gdma_ll.h"
|
||||
#include "hal/gdma_hal.h"
|
||||
#include "esp_private/gdma.h"
|
||||
#endif
|
||||
|
||||
/**
|
||||
@@ -33,12 +32,15 @@
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
portMUX_TYPE hal_lock; // spin lock for HAL object
|
||||
#if SOC_CP_DMA_SUPPORTED
|
||||
cp_dma_hal_context_t hal; // CP DMA hal
|
||||
intr_handle_t intr; // CP DMA interrupt handle
|
||||
portMUX_TYPE hal_lock; // CP DMA HAL level spin lock
|
||||
#elif SOC_GDMA_SUPPORTED
|
||||
gdma_hal_context_t hal; // General DMA hal
|
||||
gdma_channel_handle_t tx_channel;
|
||||
gdma_channel_handle_t rx_channel;
|
||||
#endif
|
||||
intptr_t rx_eof_addr;
|
||||
bool isr_need_yield; // if current isr needs a yield for higher priority task
|
||||
} async_memcpy_impl_t;
|
||||
|
||||
@@ -49,28 +51,13 @@ typedef struct {
|
||||
*/
|
||||
void async_memcpy_isr_on_rx_done_event(async_memcpy_impl_t *impl);
|
||||
|
||||
/**
|
||||
* @brief Allocate interrupt handle, register default isr handler
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @param int_flags interrupt flags
|
||||
* @param intr Returned interrupt handle
|
||||
* @return
|
||||
* - ESP_OK: Allocate interrupt handle successfully
|
||||
* - ESP_ERR_INVALID_ARG: Allocate interrupt handle failed because of invalid argument
|
||||
* - ESP_FAIL: Allocate interrupt handle failed because of other error
|
||||
*/
|
||||
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr);
|
||||
|
||||
/**
|
||||
* @brief Initialize async mcp implementation layer
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @param outlink_base Pointer to the first TX descriptor
|
||||
* @param inlink_base Pointer to the first RX descriptor
|
||||
* @return Always return ESP_OK
|
||||
*/
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base);
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl);
|
||||
|
||||
/**
|
||||
* @brief Deinitialize async mcp implementation layer
|
||||
@@ -84,9 +71,11 @@ esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl);
|
||||
* @brief Start async mcp (on implementation layer)
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @param outlink_base base descriptor address for TX DMA channel
|
||||
* @param inlink_base base descriptor address for RX DMA channel
|
||||
* @return Always return ESP_OK
|
||||
*/
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl);
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl, intptr_t outlink_base, intptr_t inlink_base);
|
||||
|
||||
/**
|
||||
* @brief Stop async mcp (on implementation layer)
|
||||
@@ -114,11 +103,3 @@ esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl);
|
||||
* @return True if both address are valid
|
||||
*/
|
||||
bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *src, void *dst);
|
||||
|
||||
/**
|
||||
* @brief Get the EOF RX descriptor address
|
||||
*
|
||||
* @param impl async mcp implementation layer context pointer
|
||||
* @return Pointer to the EOF RX descriptor
|
||||
*/
|
||||
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl);
|
||||
|
@@ -33,6 +33,7 @@ IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
|
||||
|
||||
// End-Of-Frame on RX side
|
||||
if (status & CP_DMA_LL_EVENT_RX_EOF) {
|
||||
mcp_impl->rx_eof_addr = cp_dma_ll_get_rx_eof_descriptor_address(mcp_impl->hal.dev);
|
||||
async_memcpy_isr_on_rx_done_event(mcp_impl);
|
||||
}
|
||||
|
||||
@@ -42,30 +43,30 @@ IRAM_ATTR static void async_memcpy_impl_default_isr_handler(void *args)
|
||||
}
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_allocate_intr(async_memcpy_impl_t *impl, int int_flags, intr_handle_t *intr)
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return esp_intr_alloc(ETS_DMA_COPY_INTR_SOURCE, int_flags, async_memcpy_impl_default_isr_handler, impl, intr);
|
||||
}
|
||||
esp_err_t ret = ESP_OK;
|
||||
|
||||
esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl, dma_descriptor_t *outlink_base, dma_descriptor_t *inlink_base)
|
||||
{
|
||||
impl->hal_lock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
||||
cp_dma_hal_config_t config = {
|
||||
.inlink_base = inlink_base,
|
||||
.outlink_base = outlink_base
|
||||
};
|
||||
cp_dma_hal_config_t config = {};
|
||||
cp_dma_hal_init(&impl->hal, &config);
|
||||
return ESP_OK;
|
||||
|
||||
ret = esp_intr_alloc(ETS_DMA_COPY_INTR_SOURCE, ESP_INTR_FLAG_IRAM, async_memcpy_impl_default_isr_handler, impl, &impl->intr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl)
|
||||
{
|
||||
esp_err_t ret = ESP_OK;
|
||||
|
||||
cp_dma_hal_deinit(&impl->hal);
|
||||
return ESP_OK;
|
||||
ret = esp_intr_free(impl->intr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl)
|
||||
esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl, intptr_t outlink_base, intptr_t inlink_base)
|
||||
{
|
||||
cp_dma_hal_set_desc_base_addr(&impl->hal, outlink_base, inlink_base);
|
||||
cp_dma_hal_start(&impl->hal); // enable DMA and interrupt
|
||||
return ESP_OK;
|
||||
}
|
||||
@@ -88,8 +89,3 @@ bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *
|
||||
// CP_DMA can only access SRAM
|
||||
return esp_ptr_internal(src) && esp_ptr_internal(dst);
|
||||
}
|
||||
|
||||
dma_descriptor_t *async_memcpy_impl_get_rx_suc_eof_descriptor(async_memcpy_impl_t *impl)
|
||||
{
|
||||
return (dma_descriptor_t *)cp_dma_ll_get_rx_eof_descriptor_address(impl->hal.dev);
|
||||
}
|
||||
|
@@ -27,10 +27,13 @@ void cp_dma_hal_init(cp_dma_hal_context_t *hal, const cp_dma_hal_config_t *confi
|
||||
cp_dma_ll_enable_intr(hal->dev, UINT32_MAX, false);
|
||||
cp_dma_ll_clear_intr_status(hal->dev, UINT32_MAX);
|
||||
cp_dma_ll_enable_owner_check(hal->dev, true);
|
||||
}
|
||||
|
||||
void cp_dma_hal_set_desc_base_addr(cp_dma_hal_context_t *hal, intptr_t outlink_base, intptr_t inlink_base)
|
||||
{
|
||||
/* set base address of the first descriptor */
|
||||
cp_dma_ll_tx_set_descriptor_base_addr(hal->dev, (uint32_t)config->outlink_base);
|
||||
cp_dma_ll_rx_set_descriptor_base_addr(hal->dev, (uint32_t)config->inlink_base);
|
||||
cp_dma_ll_tx_set_descriptor_base_addr(hal->dev, outlink_base);
|
||||
cp_dma_ll_rx_set_descriptor_base_addr(hal->dev, inlink_base);
|
||||
}
|
||||
|
||||
void cp_dma_hal_deinit(cp_dma_hal_context_t *hal)
|
||||
|
@@ -45,8 +45,6 @@ typedef struct {
|
||||
} cp_dma_hal_context_t;
|
||||
|
||||
typedef struct {
|
||||
dma_descriptor_t *outlink_base; /*!< Address of the first outlink descriptor */
|
||||
dma_descriptor_t *inlink_base; /*!< Address of the first inlink descriptor */
|
||||
} cp_dma_hal_config_t;
|
||||
|
||||
/**
|
||||
@@ -62,6 +60,11 @@ void cp_dma_hal_init(cp_dma_hal_context_t *hal, const cp_dma_hal_config_t *confi
|
||||
*/
|
||||
void cp_dma_hal_deinit(cp_dma_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Set descriptor base address
|
||||
*/
|
||||
void cp_dma_hal_set_desc_base_addr(cp_dma_hal_context_t *hal, intptr_t outlink_base, intptr_t inlink_base);
|
||||
|
||||
/**
|
||||
* @brief Start mem2mem DMA state machine
|
||||
*/
|
||||
|
Reference in New Issue
Block a user