mirror of
https://github.com/espressif/esp-idf.git
synced 2025-12-01 06:39:27 +01:00
esp32s2: add CP_DMA driver
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
set(srcs "adc_hal.c"
|
||||
"brownout_hal.c"
|
||||
"cp_dma_hal.c"
|
||||
"rtc_clk.c"
|
||||
"rtc_clk_init.c"
|
||||
"rtc_init.c"
|
||||
|
||||
218
components/soc/src/esp32s2/cp_dma_hal.c
Normal file
218
components/soc/src/esp32s2/cp_dma_hal.c
Normal file
@@ -0,0 +1,218 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "hal/cp_dma_hal.h"
|
||||
#include "hal/cp_dma_ll.h"
|
||||
#include "soc/cp_dma_caps.h"
|
||||
|
||||
#define MCP_DESCRIPTOR_BUFFER_OWNER_DMA (1)
|
||||
#define MCP_DESCRIPTOR_BUFFER_OWNER_CPU (0)
|
||||
|
||||
void cp_dma_hal_init(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *tx_descriptors[], uint32_t tx_desc_num, cp_dma_descriptor_t *rx_descriptors[], uint32_t rx_desc_num)
|
||||
{
|
||||
hal->dev = &CP_DMA;
|
||||
cp_dma_ll_enable_clock(hal->dev, true);
|
||||
cp_dma_ll_reset_in_link(hal->dev);
|
||||
cp_dma_ll_reset_out_link(hal->dev);
|
||||
cp_dma_ll_reset_cmd_fifo(hal->dev);
|
||||
cp_dma_ll_reset_fifo(hal->dev);
|
||||
cp_dma_ll_enable_intr(hal->dev, UINT32_MAX, false);
|
||||
cp_dma_ll_clear_intr_status(hal->dev, UINT32_MAX);
|
||||
cp_dma_ll_enable_owner_check(hal->dev, true);
|
||||
|
||||
// circle TX descriptors
|
||||
for (int i = 0; i < tx_desc_num; i++) {
|
||||
tx_descriptors[i]->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_CPU;
|
||||
tx_descriptors[i]->next = tx_descriptors[i + 1];
|
||||
}
|
||||
tx_descriptors[tx_desc_num - 1]->next = tx_descriptors[0];
|
||||
|
||||
// circle RX descriptors
|
||||
for (int i = 0; i < rx_desc_num; i++) {
|
||||
rx_descriptors[i]->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_CPU;
|
||||
rx_descriptors[i]->next = rx_descriptors[i + 1];
|
||||
}
|
||||
rx_descriptors[rx_desc_num - 1]->next = rx_descriptors[0];
|
||||
|
||||
// set the start of each descriptor chain
|
||||
hal->tx_desc = tx_descriptors[0];
|
||||
hal->rx_desc = rx_descriptors[0];
|
||||
|
||||
/* set base address of the first descriptor */
|
||||
cp_dma_ll_tx_set_descriptor_base_addr(hal->dev, (uint32_t)hal->tx_desc);
|
||||
cp_dma_ll_rx_set_descriptor_base_addr(hal->dev, (uint32_t)hal->rx_desc);
|
||||
|
||||
hal->next_rx_desc_to_check = rx_descriptors[0];
|
||||
}
|
||||
|
||||
void cp_dma_hal_deinit(cp_dma_hal_context_t *hal)
|
||||
{
|
||||
cp_dma_ll_enable_clock(hal->dev, false);
|
||||
hal->dev = NULL;
|
||||
hal->tx_desc = NULL;
|
||||
hal->rx_desc = NULL;
|
||||
}
|
||||
|
||||
void cp_dma_hal_start(cp_dma_hal_context_t *hal)
|
||||
{
|
||||
// enable DMA engine
|
||||
cp_dma_ll_start_rx(hal->dev, true);
|
||||
cp_dma_ll_start_tx(hal->dev, true);
|
||||
// enable RX EOF interrupt
|
||||
cp_dma_ll_enable_intr(hal->dev, CP_DMA_LL_EVENT_RX_EOF, true);
|
||||
}
|
||||
|
||||
void cp_dma_hal_stop(cp_dma_hal_context_t *hal)
|
||||
{
|
||||
// disable interrupt
|
||||
cp_dma_ll_enable_intr(hal->dev, CP_DMA_LL_EVENT_RX_EOF, false);
|
||||
cp_dma_ll_enable_intr(hal->dev, CP_DMA_LL_EVENT_TX_EOF, false);
|
||||
|
||||
// disable DMA
|
||||
cp_dma_ll_start_rx(hal->dev, false);
|
||||
cp_dma_ll_start_tx(hal->dev, false);
|
||||
}
|
||||
|
||||
uint32_t cp_dma_hal_get_intr_status(cp_dma_hal_context_t *hal)
|
||||
{
|
||||
return cp_dma_ll_get_intr_status(hal->dev);
|
||||
}
|
||||
|
||||
void cp_dma_hal_clear_intr_status(cp_dma_hal_context_t *hal, uint32_t mask)
|
||||
{
|
||||
cp_dma_ll_clear_intr_status(hal->dev, mask);
|
||||
}
|
||||
|
||||
int cp_dma_hal_prepare_transmit(cp_dma_hal_context_t *hal, void *buffer, size_t len, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc)
|
||||
{
|
||||
uint32_t prepared_length = 0;
|
||||
uint8_t *buf = (uint8_t *)buffer;
|
||||
cp_dma_descriptor_t *desc = hal->tx_desc; // descriptor iterator
|
||||
cp_dma_descriptor_t *start = desc;
|
||||
cp_dma_descriptor_t *end = desc;
|
||||
|
||||
while (len > SOC_CP_DMA_MAX_BUFFER_SIZE) {
|
||||
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
|
||||
desc->dw0.eof = 0; // not the end of the transaction
|
||||
desc->dw0.size = SOC_CP_DMA_MAX_BUFFER_SIZE;
|
||||
desc->dw0.length = SOC_CP_DMA_MAX_BUFFER_SIZE;
|
||||
desc->buffer = &buf[prepared_length];
|
||||
desc = desc->next; // move to next descriptor
|
||||
prepared_length += SOC_CP_DMA_MAX_BUFFER_SIZE;
|
||||
len -= SOC_CP_DMA_MAX_BUFFER_SIZE;
|
||||
} else {
|
||||
// out of TX descriptors
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
if (len) {
|
||||
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
|
||||
end = desc; // the last descriptor used
|
||||
desc->dw0.eof = 1; // end of the transaction
|
||||
desc->dw0.size = len;
|
||||
desc->dw0.length = len;
|
||||
desc->buffer = &buf[prepared_length];
|
||||
desc = desc->next; // move to next descriptor
|
||||
prepared_length += len;
|
||||
} else {
|
||||
// out of TX descriptors
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
|
||||
*start_desc = start;
|
||||
*end_desc = end;
|
||||
_exit:
|
||||
return prepared_length;
|
||||
}
|
||||
|
||||
int cp_dma_hal_prepare_receive(cp_dma_hal_context_t *hal, void *buffer, size_t size, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc)
|
||||
{
|
||||
uint32_t prepared_length = 0;
|
||||
uint8_t *buf = (uint8_t *)buffer;
|
||||
cp_dma_descriptor_t *desc = hal->rx_desc; // descriptor iterator
|
||||
cp_dma_descriptor_t *start = desc;
|
||||
cp_dma_descriptor_t *end = desc;
|
||||
|
||||
while (size > SOC_CP_DMA_MAX_BUFFER_SIZE) {
|
||||
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
|
||||
desc->dw0.size = SOC_CP_DMA_MAX_BUFFER_SIZE;
|
||||
desc->buffer = &buf[prepared_length];
|
||||
desc = desc->next; // move to next descriptor
|
||||
prepared_length += SOC_CP_DMA_MAX_BUFFER_SIZE;
|
||||
size -= SOC_CP_DMA_MAX_BUFFER_SIZE;
|
||||
} else {
|
||||
// out of TX descriptors
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
if (size) {
|
||||
if (desc->dw0.owner != MCP_DESCRIPTOR_BUFFER_OWNER_DMA) {
|
||||
end = desc; // the last descriptor used
|
||||
desc->dw0.size = size;
|
||||
desc->buffer = &buf[prepared_length];
|
||||
desc = desc->next; // move to next descriptor
|
||||
prepared_length += size;
|
||||
} else {
|
||||
// out of TX descriptors
|
||||
goto _exit;
|
||||
}
|
||||
}
|
||||
|
||||
*start_desc = start;
|
||||
*end_desc = end;
|
||||
_exit:
|
||||
return prepared_length;
|
||||
}
|
||||
|
||||
void cp_dma_hal_restart_tx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc)
|
||||
{
|
||||
// Give descriptor owner to DMA
|
||||
cp_dma_descriptor_t *desc = start_desc;
|
||||
while (desc != end_desc) {
|
||||
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
desc = desc->next;
|
||||
}
|
||||
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
hal->tx_desc = end_desc->next; // update the next available descriptor in HAL
|
||||
cp_dma_ll_restart_tx(hal->dev);
|
||||
}
|
||||
|
||||
void cp_dma_hal_restart_rx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc)
|
||||
{
|
||||
// Give descriptor owner to DMA
|
||||
cp_dma_descriptor_t *desc = start_desc;
|
||||
while (desc != end_desc) {
|
||||
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
desc = desc->next;
|
||||
}
|
||||
desc->dw0.owner = MCP_DESCRIPTOR_BUFFER_OWNER_DMA;
|
||||
hal->rx_desc = end_desc->next; // update the next available descriptor in HAL
|
||||
cp_dma_ll_restart_rx(hal->dev);
|
||||
}
|
||||
|
||||
bool cp_dma_hal_get_next_rx_descriptor(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *eof_desc, cp_dma_descriptor_t **next_desc)
|
||||
{
|
||||
cp_dma_descriptor_t *next = hal->next_rx_desc_to_check;
|
||||
// additional check, to avoid potential interrupt got triggered by mistake
|
||||
if (next->dw0.owner == MCP_DESCRIPTOR_BUFFER_OWNER_CPU) {
|
||||
hal->next_rx_desc_to_check = hal->next_rx_desc_to_check->next;
|
||||
*next_desc = next;
|
||||
// return if we need to continue
|
||||
return eof_desc == next ? false : true;
|
||||
} else {
|
||||
*next_desc = NULL;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
155
components/soc/src/esp32s2/include/hal/cp_dma_hal.h
Normal file
155
components/soc/src/esp32s2/include/hal/cp_dma_hal.h
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*******************************************************************************
|
||||
* NOTICE
|
||||
* The HAL is not public api, don't use in application code.
|
||||
* See readme.md in soc/README.md
|
||||
******************************************************************************/
|
||||
|
||||
// CP DMA HAL usages:
|
||||
// 1. Initialize HAL layer by cp_dma_hal_init, pass in the allocated descriptors for TX and RX
|
||||
// 2. Enable DMA and interrupt by cp_dma_hal_start
|
||||
// 3. Prepare descriptors used for TX and RX
|
||||
// 4. Restart the DMA engine in case it's not in working
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
#include "esp_attr.h"
|
||||
#include "soc/cp_dma_struct.h"
|
||||
|
||||
typedef struct cp_dma_descriptor {
|
||||
struct {
|
||||
uint32_t size : 12; /*!< buffer size */
|
||||
uint32_t length : 12; /*!< specify number of valid bytes in the buffer */
|
||||
uint32_t reversed24_27 : 4; /*!< reserved */
|
||||
uint32_t err : 1; /*!< specify whether a received buffer contains error */
|
||||
uint32_t reserved29 : 1; /*!< reserved */
|
||||
uint32_t eof : 1; /*!< if this dma link is the last one, you shoule set this bit 1 */
|
||||
uint32_t owner : 1; /*!< specify the owner of buffer that this descriptor points to, 1=DMA, 0=CPU. DMA will clear it after use. */
|
||||
} dw0; /*!< descriptor word 0 */
|
||||
void *buffer; /*!< pointer to the buffer */
|
||||
struct cp_dma_descriptor *next; /*!< pointer to the next descriptor or NULL if this descriptor is the last one */
|
||||
} cp_dma_descriptor_t;
|
||||
|
||||
_Static_assert(sizeof(cp_dma_descriptor_t) == 12, "cp_dma_descriptor_t should occupy 12 bytes in memory");
|
||||
|
||||
/**
|
||||
* @brief HAL context
|
||||
*
|
||||
* @note `tx_desc` and `rx_desc` are internal state of the HAL, will be modified during the operations.
|
||||
* Upper layer of HAL should keep the buffer address themselves and make sure the buffers are freed when the HAL is no longer used.
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
cp_dma_dev_t *dev;
|
||||
cp_dma_descriptor_t *tx_desc;
|
||||
cp_dma_descriptor_t *rx_desc;
|
||||
cp_dma_descriptor_t *next_rx_desc_to_check;
|
||||
} cp_dma_hal_context_t;
|
||||
|
||||
/**
|
||||
* @brief Initialize HAL layer context
|
||||
*
|
||||
* @param hal HAL layer context, memroy should be allocated at driver layer
|
||||
* @param tx_descriptors out link descriptor pool
|
||||
* @param tx_desc_num number of out link descriptors
|
||||
* @param rx_descriptors in line descriptor pool
|
||||
* @param rx_desc_num number of in link descriptors
|
||||
*/
|
||||
void cp_dma_hal_init(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *tx_descriptors[], uint32_t tx_desc_num, cp_dma_descriptor_t *rx_descriptors[], uint32_t rx_desc_num);
|
||||
|
||||
/**
|
||||
* @brief Deinitialize HAL layer context
|
||||
*/
|
||||
void cp_dma_hal_deinit(cp_dma_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Start mem2mem DMA state machine
|
||||
*/
|
||||
void cp_dma_hal_start(cp_dma_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Stop mem2mem DMA state machine
|
||||
*/
|
||||
void cp_dma_hal_stop(cp_dma_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Get interrupt status word
|
||||
*
|
||||
* @return uint32_t Interrupt status
|
||||
*/
|
||||
uint32_t cp_dma_hal_get_intr_status(cp_dma_hal_context_t *hal) IRAM_ATTR;
|
||||
|
||||
/**
|
||||
* @brief Clear interrupt mask
|
||||
*
|
||||
* @param mask interrupt mask
|
||||
*/
|
||||
void cp_dma_hal_clear_intr_status(cp_dma_hal_context_t *hal, uint32_t mask) IRAM_ATTR;
|
||||
|
||||
/**
|
||||
* @brief Get next RX descriptor that needs recycling
|
||||
*
|
||||
* @param eof_desc EOF descriptor for this iteration
|
||||
* @param[out] next_desc Next descriptor needs to check
|
||||
* @return Whether to continue
|
||||
*/
|
||||
bool cp_dma_hal_get_next_rx_descriptor(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *eof_desc, cp_dma_descriptor_t **next_desc);
|
||||
|
||||
/**
|
||||
* @brief Prepare buffer to be transmitted
|
||||
*
|
||||
* @param hal HAL layer context
|
||||
* @param buffer buffer address
|
||||
* @param len buffer size
|
||||
* @param[out] start_desc The first descriptor that carry the TX transaction
|
||||
* @param[out] end_desc The last descriptor that carry the TX transaction
|
||||
* @return Number of bytes has been parepared to transmit
|
||||
*/
|
||||
int cp_dma_hal_prepare_transmit(cp_dma_hal_context_t *hal, void *buffer, size_t len, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc);
|
||||
|
||||
/**
|
||||
* @brief Prepare buffer to receive
|
||||
*
|
||||
* @param hal HAL layer context
|
||||
* @param buffer buffer address
|
||||
* @param size buffer size
|
||||
* @param[out] start_desc The first descriptor that carries the RX transaction
|
||||
* @param[out] end_desc The last descriptor that carries the RX transaction
|
||||
* @return Number of bytes has been parepared to receive
|
||||
*/
|
||||
int cp_dma_hal_prepare_receive(cp_dma_hal_context_t *hal, void *buffer, size_t size, cp_dma_descriptor_t **start_desc, cp_dma_descriptor_t **end_desc);
|
||||
|
||||
/**@{*/
|
||||
/**
|
||||
* @brief Give the owner of descriptors between [start_desc, end_desc] to DMA, and restart DMA HW engine
|
||||
*
|
||||
* @param hal HAL layer context
|
||||
* @param start_desc The first descriptor that carries one transaction
|
||||
* @param end_desc The last descriptor that carries one transaction
|
||||
*/
|
||||
void cp_dma_hal_restart_tx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc);
|
||||
void cp_dma_hal_restart_rx(cp_dma_hal_context_t *hal, cp_dma_descriptor_t *start_desc, cp_dma_descriptor_t *end_desc);
|
||||
/**@}*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
159
components/soc/src/esp32s2/include/hal/cp_dma_ll.h
Normal file
159
components/soc/src/esp32s2/include/hal/cp_dma_ll.h
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "soc/cp_dma_struct.h"
|
||||
|
||||
#define CP_DMA_LL_EVENT_RX_DONE (1 << 0)
|
||||
#define CP_DMA_LL_EVENT_RX_EOF (1 << 1)
|
||||
#define CP_DMA_LL_EVENT_TX_DONE (1 << 2)
|
||||
#define CP_DMA_LL_EVENT_TX_EOF (1 << 3)
|
||||
#define CP_DMA_LL_EVENT_RX_DESC_ERR (1 << 4)
|
||||
#define CP_DMA_LL_EVENT_TX_DESC_ERR (1 << 5)
|
||||
#define CP_DMA_LL_EVENT_RX_DESC_EMPTY (1 << 6)
|
||||
#define CP_DMA_LL_EVENT_TX_TOTAL_EOF (1 << 7)
|
||||
#define CP_DMA_LL_EVENT_ALL (0xFF)
|
||||
|
||||
/**
|
||||
* Copy DMA firstly reads data to be transferred from internal RAM,
|
||||
* stores the data into DMA FIFO via an outlink,
|
||||
* and then writes the data to the destination internal RAM via an inlink.
|
||||
*/
|
||||
|
||||
static inline void cp_dma_ll_reset_in_link(cp_dma_dev_t *dev)
|
||||
{
|
||||
dev->dma_conf.dma_in_rst = 1;
|
||||
dev->dma_conf.dma_in_rst = 0;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_reset_out_link(cp_dma_dev_t *dev)
|
||||
{
|
||||
dev->dma_conf.dma_out_rst = 1;
|
||||
dev->dma_conf.dma_out_rst = 0;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_reset_fifo(cp_dma_dev_t *dev)
|
||||
{
|
||||
dev->dma_conf.dma_fifo_rst = 1;
|
||||
dev->dma_conf.dma_fifo_rst = 0;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_reset_cmd_fifo(cp_dma_dev_t *dev)
|
||||
{
|
||||
dev->dma_conf.dma_cmdfifo_rst = 1;
|
||||
dev->dma_conf.dma_cmdfifo_rst = 0;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_enable_owner_check(cp_dma_dev_t *dev, bool enable)
|
||||
{
|
||||
dev->dma_conf.dma_check_owner = enable;
|
||||
dev->dma_conf.dma_out_auto_wrback = 1;
|
||||
dev->dma_conf.dma_out_owner = 0;
|
||||
dev->dma_conf.dma_in_owner = 0;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_enable_clock(cp_dma_dev_t *dev, bool enable)
|
||||
{
|
||||
dev->dma_conf.dma_clk_en = enable;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_enable_intr(cp_dma_dev_t *dev, uint32_t mask, bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
dev->dma_int_ena.val |= mask;
|
||||
} else {
|
||||
dev->dma_int_ena.val &= ~mask;
|
||||
}
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) uint32_t cp_dma_ll_get_intr_status(cp_dma_dev_t *dev)
|
||||
{
|
||||
return dev->dma_int_st.val;
|
||||
}
|
||||
|
||||
static inline __attribute__((always_inline)) void cp_dma_ll_clear_intr_status(cp_dma_dev_t *dev, uint32_t mask)
|
||||
{
|
||||
dev->dma_int_clr.val = mask;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_tx_set_descriptor_base_addr(cp_dma_dev_t *dev, uint32_t address)
|
||||
{
|
||||
dev->dma_out_link.dma_outlink_addr = address;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_rx_set_descriptor_base_addr(cp_dma_dev_t *dev, uint32_t address)
|
||||
{
|
||||
dev->dma_in_link.dma_inlink_addr = address;
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_start_tx(cp_dma_dev_t *dev, bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
dev->dma_out_link.dma_outlink_start = 1; // cleared automatically by HW
|
||||
} else {
|
||||
dev->dma_out_link.dma_outlink_stop = 1; // cleared automatically by HW
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_start_rx(cp_dma_dev_t *dev, bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
dev->dma_in_link.dma_inlink_start = 1; // cleared automatically by HW
|
||||
} else {
|
||||
dev->dma_in_link.dma_inlink_stop = 1; // cleared automatically by HW
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_restart_tx(cp_dma_dev_t *dev)
|
||||
{
|
||||
dev->dma_out_link.dma_outlink_restart = 1; // cleared automatically by HW
|
||||
}
|
||||
|
||||
static inline void cp_dma_ll_restart_rx(cp_dma_dev_t *dev)
|
||||
{
|
||||
dev->dma_in_link.dma_inlink_restart = 1; // cleared automatically by HW
|
||||
}
|
||||
|
||||
// get the address of last rx descriptor
|
||||
static inline uint32_t cp_dma_ll_get_rx_eof_descriptor_address(cp_dma_dev_t *dev)
|
||||
{
|
||||
return dev->dma_in_eof_des_addr.dma_in_suc_eof_des_addr;
|
||||
}
|
||||
|
||||
// get the address of last tx descriptor
|
||||
static inline uint32_t cp_dma_ll_get_tx_eof_descriptor_address(cp_dma_dev_t *dev)
|
||||
{
|
||||
return dev->dma_out_eof_des_addr.dma_out_eof_des_addr;
|
||||
}
|
||||
|
||||
static inline uint32_t cp_dma_ll_get_tx_status(cp_dma_dev_t *dev)
|
||||
{
|
||||
return dev->dma_out_st.val;
|
||||
}
|
||||
|
||||
static inline uint32_t cp_dma_ll_get_rx_status(cp_dma_dev_t *dev)
|
||||
{
|
||||
return dev->dma_in_st.val;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
Reference in New Issue
Block a user