|
|
|
@@ -0,0 +1,966 @@
|
|
|
|
|
/*
|
|
|
|
|
* SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD
|
|
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
#include <sys/queue.h>
|
|
|
|
|
#include <sys/lock.h>
|
|
|
|
|
#include "esp_check.h"
|
|
|
|
|
#include "esp_log.h"
|
|
|
|
|
#include "freertos/portmacro.h"
|
|
|
|
|
#include "freertos/FreeRTOS.h"
|
|
|
|
|
#include "freertos/task.h"
|
|
|
|
|
#include "freertos/semphr.h"
|
|
|
|
|
#include "esp_heap_caps.h"
|
|
|
|
|
#include "esp_cache.h"
|
|
|
|
|
#include "hal/cache_hal.h"
|
|
|
|
|
#include "hal/cache_ll.h"
|
|
|
|
|
#include "driver/ppa.h"
|
|
|
|
|
#include "esp_private/dma2d.h"
|
|
|
|
|
#include "hal/dma2d_ll.h"
|
|
|
|
|
#include "soc/dma2d_channel.h"
|
|
|
|
|
#include "hal/ppa_ll.h"
|
|
|
|
|
#include "hal/ppa_types.h"
|
|
|
|
|
#include "hal/color_types.h"
|
|
|
|
|
#include "hal/color_hal.h"
|
|
|
|
|
#include "esp_private/periph_ctrl.h"
|
|
|
|
|
|
|
|
|
|
#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
|
|
|
|
|
|
|
|
|
|
static const char *TAG = "ppa";
|
|
|
|
|
|
|
|
|
|
typedef struct ppa_dev_t *ppa_soc_handle_t; // PPA SOC layer handle
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
ppa_soc_handle_t dev;
|
|
|
|
|
} ppa_hal_context_t;
|
|
|
|
|
|
|
|
|
|
void ppa_hal_init(ppa_hal_context_t *hal)
|
|
|
|
|
{
|
|
|
|
|
hal->dev = PPA_LL_GET_HW;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void ppa_hal_deinit(ppa_hal_context_t *hal)
|
|
|
|
|
{
|
|
|
|
|
hal->dev = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PPA module contains SR engine and Blending engine
|
|
|
|
|
|
|
|
|
|
// typedef struct ppa_group_t ppa_group_t;
|
|
|
|
|
typedef struct ppa_engine_t ppa_engine_t;
|
|
|
|
|
|
|
|
|
|
typedef ppa_sr_trans_config_t ppa_sr_transaction_t;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
|
|
|
|
|
} ppa_blend_transaction_t;
|
|
|
|
|
|
|
|
|
|
typedef struct ppa_trans_s {
|
|
|
|
|
STAILQ_ENTRY(ppa_trans_s) entry; // link entry
|
|
|
|
|
// union {
|
|
|
|
|
// const ppa_sr_transaction_t *sr_desc;
|
|
|
|
|
// const ppa_blending_transaction_t *blending_desc;
|
|
|
|
|
// };
|
|
|
|
|
dma2d_trans_config_t *trans_desc;
|
|
|
|
|
dma2d_trans_t *dma_trans_placeholder;
|
|
|
|
|
SemaphoreHandle_t sem;
|
|
|
|
|
} ppa_trans_t;
|
|
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
|
union {
|
|
|
|
|
ppa_sr_transaction_t *sr_desc;
|
|
|
|
|
ppa_blend_transaction_t *blend_desc;
|
|
|
|
|
};
|
|
|
|
|
ppa_engine_t *ppa_engine;
|
|
|
|
|
ppa_trans_t *trans_elm;
|
|
|
|
|
dma2d_trigger_peripheral_t trigger_periph;
|
|
|
|
|
// dma2d_csc_config_t
|
|
|
|
|
// dma2d_strategy_config_t *dma_strategy;
|
|
|
|
|
// dma2d_transfer_ability_t *dma_transfer_ability;
|
|
|
|
|
// dma2d_rx_event_callbacks_t *event_cbs;
|
|
|
|
|
} ppa_dma2d_trans_on_picked_config_t;
|
|
|
|
|
|
|
|
|
|
struct ppa_engine_t {
|
|
|
|
|
// ppa_group_t *group;
|
|
|
|
|
ppa_engine_type_t type;
|
|
|
|
|
portMUX_TYPE spinlock;
|
|
|
|
|
SemaphoreHandle_t sem;
|
|
|
|
|
bool in_accepting_trans_state;
|
|
|
|
|
// pending transactions queue? union ppa_sr_trans_config_t, ppa_blending_trans_config_t? handle when to free (at trans start or at trans end?)
|
|
|
|
|
STAILQ_HEAD(trans, ppa_trans_s) trans_stailq; // link head of pending transactions for the PPA engine
|
|
|
|
|
// callback func? Here or in the struct above?
|
|
|
|
|
// dma2d_rx_event_callbacks_t event_cbs;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
typedef struct ppa_sr_engine_t {
|
|
|
|
|
ppa_engine_t base;
|
|
|
|
|
dma2d_descriptor_t *dma_tx_desc;
|
|
|
|
|
dma2d_descriptor_t *dma_rx_desc;
|
|
|
|
|
} ppa_sr_engine_t;
|
|
|
|
|
|
|
|
|
|
typedef struct ppa_blend_engine_t {
|
|
|
|
|
ppa_engine_t base;
|
|
|
|
|
dma2d_descriptor_t *dma_tx_bg_desc;
|
|
|
|
|
dma2d_descriptor_t *dma_tx_fg_desc;
|
|
|
|
|
dma2d_descriptor_t *dma_rx_desc;
|
|
|
|
|
} ppa_blend_engine_t;
|
|
|
|
|
|
|
|
|
|
// how to free and how to push next trans into dma2d queue after engine is free (callback triggered)
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
// struct ppa_group_t {
|
|
|
|
|
// int group_id;
|
|
|
|
|
// portMUX_TYPE spinlock;
|
|
|
|
|
// ppa_hal_context_t hal;
|
|
|
|
|
// dma2d_pool_handle_t dma2d_pool_handle;
|
|
|
|
|
// ppa_sr_engine_t *sr;
|
|
|
|
|
// ppa_blend_engine_t *blending;
|
|
|
|
|
// uint32_t sr_engine_ref_count;
|
|
|
|
|
// uint32_t blend_engine_ref_count;
|
|
|
|
|
// };
|
|
|
|
|
|
|
|
|
|
typedef struct ppa_platform_t {
|
|
|
|
|
_lock_t mutex; // platform level mutex lock to protect the ppa_module_acquire/ppa_module_release process
|
|
|
|
|
portMUX_TYPE spinlock; // platform level spinlock
|
|
|
|
|
// ppa_group_t *group[PPA_LL_GROUPS]; // array of PPA group instances
|
|
|
|
|
ppa_hal_context_t hal;
|
|
|
|
|
dma2d_pool_handle_t dma2d_pool_handle;
|
|
|
|
|
ppa_sr_engine_t *sr;
|
|
|
|
|
ppa_blend_engine_t *blending;
|
|
|
|
|
uint32_t sr_engine_ref_count;
|
|
|
|
|
uint32_t blend_engine_ref_count;
|
|
|
|
|
uint32_t dma_desc_mem_size;
|
|
|
|
|
} ppa_platform_t;
|
|
|
|
|
|
|
|
|
|
// PPA driver platform
|
|
|
|
|
static ppa_platform_t s_platform = {
|
|
|
|
|
.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED,
|
|
|
|
|
// .group = {},
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define PPA_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT) // TODO...
|
|
|
|
|
|
|
|
|
|
// TODO: acquire pm_lock?
|
|
|
|
|
esp_err_t ppa_engine_acquire(const ppa_engine_config_t *config, ppa_engine_handle_t *ret_engine)
|
|
|
|
|
{
|
|
|
|
|
esp_err_t ret = ESP_OK;
|
|
|
|
|
ESP_RETURN_ON_FALSE(config && ret_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
|
|
|
|
ESP_RETURN_ON_FALSE(config->engine == PPA_ENGINE_TYPE_SR || config->engine == PPA_ENGINE_TYPE_BLEND, ESP_ERR_INVALID_ARG, TAG, "invalid engine");
|
|
|
|
|
|
|
|
|
|
*ret_engine = NULL;
|
|
|
|
|
|
|
|
|
|
uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA);
|
|
|
|
|
size_t alignment = MAX(DMA2D_LL_DESC_ALIGNMENT, data_cache_line_size);
|
|
|
|
|
|
|
|
|
|
_lock_acquire(&s_platform.mutex);
|
|
|
|
|
if (s_platform.dma_desc_mem_size == 0) {
|
|
|
|
|
s_platform.dma_desc_mem_size = ALIGN_UP(sizeof(dma2d_descriptor_align8_t), alignment);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (config->engine == PPA_ENGINE_TYPE_SR) {
|
|
|
|
|
if (!s_platform.sr) {
|
|
|
|
|
ppa_sr_engine_t *sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
SemaphoreHandle_t sr_sem = xSemaphoreCreateBinary();
|
|
|
|
|
dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) {
|
|
|
|
|
sr_engine->dma_tx_desc = sr_tx_dma_desc;
|
|
|
|
|
sr_engine->dma_rx_desc = sr_rx_dma_desc;
|
|
|
|
|
// sr_engine->base.group = s_platform.group[group_id];
|
|
|
|
|
sr_engine->base.type = PPA_ENGINE_TYPE_SR;
|
|
|
|
|
sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
sr_engine->base.sem = sr_sem;
|
|
|
|
|
xSemaphoreGive(sr_engine->base.sem);
|
|
|
|
|
sr_engine->base.in_accepting_trans_state = true;
|
|
|
|
|
STAILQ_INIT(&sr_engine->base.trans_stailq);
|
|
|
|
|
// sr_engine->base.event_cbs
|
|
|
|
|
s_platform.sr = sr_engine;
|
|
|
|
|
s_platform.sr_engine_ref_count++;
|
|
|
|
|
*ret_engine = &sr_engine->base;
|
|
|
|
|
} else {
|
|
|
|
|
ret = ESP_ERR_NO_MEM;
|
|
|
|
|
ESP_LOGE(TAG, "no mem to register PPA SR engine");
|
|
|
|
|
free(sr_engine);
|
|
|
|
|
if (sr_sem) {
|
|
|
|
|
vSemaphoreDelete(sr_sem);
|
|
|
|
|
}
|
|
|
|
|
free(sr_tx_dma_desc);
|
|
|
|
|
free(sr_rx_dma_desc);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// SR engine already registered
|
|
|
|
|
s_platform.sr_engine_ref_count++;
|
|
|
|
|
*ret_engine = &s_platform.sr->base;
|
|
|
|
|
}
|
|
|
|
|
} else if (config->engine == PPA_ENGINE_TYPE_BLEND) {
|
|
|
|
|
if (!s_platform.blending) {
|
|
|
|
|
ppa_blend_engine_t *blending_engine = heap_caps_calloc(1, sizeof(ppa_blend_engine_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
SemaphoreHandle_t blending_sem = xSemaphoreCreateBinary();
|
|
|
|
|
dma2d_descriptor_t *blending_tx_bg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
dma2d_descriptor_t *blending_tx_fg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
dma2d_descriptor_t *blending_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
if (blending_engine && blending_sem && blending_tx_bg_dma_desc && blending_tx_fg_dma_desc && blending_rx_dma_desc) {
|
|
|
|
|
blending_engine->dma_tx_bg_desc = blending_tx_bg_dma_desc;
|
|
|
|
|
blending_engine->dma_tx_fg_desc = blending_tx_fg_dma_desc;
|
|
|
|
|
blending_engine->dma_rx_desc = blending_rx_dma_desc;
|
|
|
|
|
// blending_engine->base.group = s_platform.group[group_id];
|
|
|
|
|
blending_engine->base.type = PPA_ENGINE_TYPE_BLEND;
|
|
|
|
|
blending_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
blending_engine->base.sem = blending_sem;
|
|
|
|
|
xSemaphoreGive(blending_engine->base.sem);
|
|
|
|
|
blending_engine->base.in_accepting_trans_state = true;
|
|
|
|
|
STAILQ_INIT(&blending_engine->base.trans_stailq);
|
|
|
|
|
// blending_engine->base.event_cbs
|
|
|
|
|
s_platform.blending = blending_engine;
|
|
|
|
|
s_platform.blend_engine_ref_count++;
|
|
|
|
|
*ret_engine = &blending_engine->base;
|
|
|
|
|
} else {
|
|
|
|
|
ret = ESP_ERR_NO_MEM;
|
|
|
|
|
ESP_LOGE(TAG, "no mem to register PPA Blending engine");
|
|
|
|
|
free(blending_engine);
|
|
|
|
|
if (blending_sem) {
|
|
|
|
|
vSemaphoreDelete(blending_sem);
|
|
|
|
|
}
|
|
|
|
|
free(blending_tx_bg_dma_desc);
|
|
|
|
|
free(blending_tx_fg_dma_desc);
|
|
|
|
|
free(blending_rx_dma_desc);
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Blending engine already registered
|
|
|
|
|
s_platform.blend_engine_ref_count++;
|
|
|
|
|
*ret_engine = &s_platform.blending->base;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (ret == ESP_OK) {
|
|
|
|
|
if (!s_platform.hal.dev) {
|
|
|
|
|
assert(!s_platform.dma2d_pool_handle);
|
|
|
|
|
|
|
|
|
|
// Enable the bus clock to access PPA registers
|
|
|
|
|
PERIPH_RCC_ATOMIC() {
|
|
|
|
|
ppa_ll_enable_bus_clock(true);
|
|
|
|
|
ppa_ll_reset_register();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ppa_hal_init(&s_platform.hal); // initialize HAL context
|
|
|
|
|
|
|
|
|
|
// Get 2D-DMA pool handle
|
|
|
|
|
dma2d_pool_config_t dma2d_config = {
|
|
|
|
|
.pool_id = 0,
|
|
|
|
|
};
|
|
|
|
|
ret = dma2d_acquire_pool(&dma2d_config, &s_platform.dma2d_pool_handle);
|
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
|
ESP_LOGE(TAG, "install 2D-DMA failed");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_lock_release(&s_platform.mutex);
|
|
|
|
|
|
|
|
|
|
if (ret != ESP_OK && *ret_engine != NULL) {
|
|
|
|
|
ppa_engine_release(*ret_engine);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
esp_err_t ppa_engine_release(ppa_engine_handle_t ppa_engine)
|
|
|
|
|
{
|
|
|
|
|
esp_err_t ret = ESP_OK;
|
|
|
|
|
ESP_RETURN_ON_FALSE(ppa_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
|
|
|
|
|
|
|
|
|
_lock_acquire(&s_platform.mutex);
|
|
|
|
|
if (ppa_engine->type == PPA_ENGINE_TYPE_SR) {
|
|
|
|
|
ppa_sr_engine_t *sr_engine = __containerof(ppa_engine, ppa_sr_engine_t, base);
|
|
|
|
|
s_platform.sr_engine_ref_count--;
|
|
|
|
|
if (s_platform.sr_engine_ref_count == 0) {
|
|
|
|
|
// Stop accepting new transactions to SR engine
|
|
|
|
|
portENTER_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
sr_engine->base.in_accepting_trans_state = false;
|
|
|
|
|
portEXIT_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
// Wait until all transactions get processed
|
|
|
|
|
while (!STAILQ_EMPTY(&sr_engine->base.trans_stailq)); // TODO: Think twice, looks like I am not able to use engine semaphore to decide
|
|
|
|
|
// Now, time to free
|
|
|
|
|
s_platform.sr = NULL;
|
|
|
|
|
free(sr_engine->dma_tx_desc);
|
|
|
|
|
free(sr_engine->dma_rx_desc);
|
|
|
|
|
vSemaphoreDelete(sr_engine->base.sem);
|
|
|
|
|
free(sr_engine);
|
|
|
|
|
}
|
|
|
|
|
} else if (ppa_engine->type == PPA_ENGINE_TYPE_BLEND) {
|
|
|
|
|
ppa_blend_engine_t *blending_engine = __containerof(ppa_engine, ppa_blend_engine_t, base);
|
|
|
|
|
s_platform.blend_engine_ref_count--;
|
|
|
|
|
if (s_platform.blend_engine_ref_count == 0) {
|
|
|
|
|
// Stop accepting new transactions to blending engine
|
|
|
|
|
portENTER_CRITICAL(&blending_engine->base.spinlock);
|
|
|
|
|
blending_engine->base.in_accepting_trans_state = false;
|
|
|
|
|
portEXIT_CRITICAL(&blending_engine->base.spinlock);
|
|
|
|
|
// Wait until all transactions get processed
|
|
|
|
|
while (!STAILQ_EMPTY(&blending_engine->base.trans_stailq)); // TODO: Think twice, looks like I am not able to use engine semaphore to decide
|
|
|
|
|
// Now, time to free
|
|
|
|
|
s_platform.blending = NULL;
|
|
|
|
|
free(blending_engine->dma_tx_bg_desc);
|
|
|
|
|
free(blending_engine->dma_tx_fg_desc);
|
|
|
|
|
free(blending_engine->dma_rx_desc);
|
|
|
|
|
vSemaphoreDelete(blending_engine->base.sem);
|
|
|
|
|
free(blending_engine);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!s_platform.sr && !s_platform.blending) {
|
|
|
|
|
assert(s_platform.sr_engine_ref_count == 0 && s_platform.blend_engine_ref_count == 0);
|
|
|
|
|
|
|
|
|
|
if (s_platform.dma2d_pool_handle) {
|
|
|
|
|
dma2d_release_pool(s_platform.dma2d_pool_handle); // TODO: check return value. If not ESP_OK, then must be error on other 2D-DMA clients :( Give a warning log?
|
|
|
|
|
s_platform.dma2d_pool_handle = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ppa_hal_deinit(&s_platform.hal); // De-initialize HAL context
|
|
|
|
|
|
|
|
|
|
// Disable the bus clock to access PPA registers
|
|
|
|
|
PERIPH_RCC_ATOMIC() {
|
|
|
|
|
ppa_ll_enable_bus_clock(false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_lock_release(&s_platform.mutex);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// // TODO: pm lock?
|
|
|
|
|
// esp_err_t ppa_module_acquire(const ppa_group_alloc_config_t *config, ppa_group_handle_t *ret_group)
|
|
|
|
|
// {
|
|
|
|
|
// esp_err_t ret = ESP_OK;
|
|
|
|
|
// ESP_RETURN_ON_FALSE(config && ret_group, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
|
|
|
|
// ESP_RETURN_ON_FALSE(config->group_id < PPA_LL_GROUPS, ESP_ERR_INVALID_ARG, TAG, "invalid group_id");
|
|
|
|
|
|
|
|
|
|
// int group_id = config->group_id;
|
|
|
|
|
|
|
|
|
|
// // _lock_acquire(&s_platform.mutex);
|
|
|
|
|
// // if (!s_platform.group[group_id]) {
|
|
|
|
|
// // ppa_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(ppa_group_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // if (pre_alloc_group) {
|
|
|
|
|
// // ppa_hal_init(&pre_alloc_group->hal, group_id); // initialize HAL context
|
|
|
|
|
// // pre_alloc_group->group_id = group_id;
|
|
|
|
|
// // pre_alloc_group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
// // s_platform.group[group_id] = pre_alloc_group; // register to platform
|
|
|
|
|
// // // Enable the bus clock to access PPA registers
|
|
|
|
|
// // PERIPH_RCC_ATOMIC() {
|
|
|
|
|
// // ppa_ll_enable_bus_clock(group_id, true);
|
|
|
|
|
// // ppa_ll_reset_register(group_id);
|
|
|
|
|
// // }
|
|
|
|
|
|
|
|
|
|
// // // Get 2D-DMA pool handle
|
|
|
|
|
// // dma2d_pool_config_t dma2d_config = {
|
|
|
|
|
// // .pool_id = 0,
|
|
|
|
|
// // };
|
|
|
|
|
// // ret = dma2d_acquire_pool(&dma2d_config, &s_platform.group[group_id]->dma2d_pool_handle);
|
|
|
|
|
// // if (ret != ESP_OK) {
|
|
|
|
|
// // ESP_LOGE(TAG, "install 2D-DMA failed");
|
|
|
|
|
// // // free(pre_alloc_group);
|
|
|
|
|
// // // s_platform.group[group_id] = NULL;
|
|
|
|
|
// // }
|
|
|
|
|
// // } else {
|
|
|
|
|
// // ret = ESP_ERR_NO_MEM;
|
|
|
|
|
// // }
|
|
|
|
|
// // }
|
|
|
|
|
|
|
|
|
|
// // // Register PPA SR engine
|
|
|
|
|
// // if (ret == ESP_OK && config->sr_engine_en && !s_platform.group[group_id]->sr) {
|
|
|
|
|
// // ppa_sr_engine_t *sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // SemaphoreHandle_t sr_sem = xSemaphoreCreateBinary();
|
|
|
|
|
// // dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API
|
|
|
|
|
// // dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) {
|
|
|
|
|
// // sr_engine->dma_tx_desc = sr_tx_dma_desc;
|
|
|
|
|
// // sr_engine->dma_rx_desc = sr_rx_dma_desc;
|
|
|
|
|
// // sr_engine->base.group = s_platform.group[group_id];
|
|
|
|
|
// // sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
// // sr_engine->base.sem = sr_sem;
|
|
|
|
|
// // xSemaphoreGive(sr_engine->base.sem);
|
|
|
|
|
// // sr_engine->base.in_accepting_trans_state = true;
|
|
|
|
|
// // STAILQ_INIT(&sr_engine->base.trans_stailq);
|
|
|
|
|
// // // sr_engine->base.event_cbs
|
|
|
|
|
// // s_platform.group[group_id]->sr = sr_engine;
|
|
|
|
|
// // } else {
|
|
|
|
|
// // ret = ESP_ERR_NO_MEM;
|
|
|
|
|
// // ESP_LOGE(TAG, "no mem to register PPA SR engine");
|
|
|
|
|
// // free(sr_engine);
|
|
|
|
|
// // if (sr_sem) vSemaphoreDelete(sr_sem);
|
|
|
|
|
// // free(sr_tx_dma_desc);
|
|
|
|
|
// // free(sr_rx_dma_desc);
|
|
|
|
|
// // }
|
|
|
|
|
// // }
|
|
|
|
|
|
|
|
|
|
// // // Register PPA Blending engine
|
|
|
|
|
// // if (ret == ESP_OK && config->blending_engine_en && !s_platform.group[group_id]->blending) {
|
|
|
|
|
// // ppa_blending_engine_t *blending_engine = heap_caps_calloc(1, sizeof(ppa_blending_engine_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // SemaphoreHandle_t blending_sem = xSemaphoreCreateBinary();
|
|
|
|
|
// // dma2d_descriptor_t *blending_tx_bg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API
|
|
|
|
|
// // dma2d_descriptor_t *blending_tx_fg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // dma2d_descriptor_t *blending_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // if (blending_engine && blending_sem && blending_tx_bg_dma_desc && blending_tx_fg_dma_desc && blending_rx_dma_desc) {
|
|
|
|
|
// // blending_engine->dma_tx_bg_desc = blending_tx_bg_dma_desc;
|
|
|
|
|
// // blending_engine->dma_tx_fg_desc = blending_tx_fg_dma_desc;
|
|
|
|
|
// // blending_engine->dma_rx_desc = blending_rx_dma_desc;
|
|
|
|
|
// // blending_engine->base.group = s_platform.group[group_id];
|
|
|
|
|
// // blending_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
// // blending_engine->base.sem = blending_sem;
|
|
|
|
|
// // xSemaphoreGive(blending_engine->base.sem);
|
|
|
|
|
// // blending_engine->base.in_accepting_trans_state = true;
|
|
|
|
|
// // STAILQ_INIT(&blending_engine->base.trans_stailq);
|
|
|
|
|
// // // blending_engine->base.event_cbs
|
|
|
|
|
// // s_platform.group[group_id]->blending = blending_engine;
|
|
|
|
|
// // } else {
|
|
|
|
|
// // ret = ESP_ERR_NO_MEM;
|
|
|
|
|
// // ESP_LOGE(TAG, "no mem to register PPA Blending engine");
|
|
|
|
|
// // free(blending_engine);
|
|
|
|
|
// // if (blending_sem) vSemaphoreDelete(blending_sem);
|
|
|
|
|
// // free(blending_tx_bg_dma_desc);
|
|
|
|
|
// // free(blending_tx_fg_dma_desc);
|
|
|
|
|
// // free(blending_rx_dma_desc);
|
|
|
|
|
// // }
|
|
|
|
|
// // }
|
|
|
|
|
// // _lock_release(&s_platform.mutex);
|
|
|
|
|
|
|
|
|
|
// // ppa_module_release
|
|
|
|
|
|
|
|
|
|
// bool new_group = false;
|
|
|
|
|
// bool new_sr_engine = false;
|
|
|
|
|
// bool new_blending_engine = false;
|
|
|
|
|
// ppa_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(ppa_group_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// ppa_sr_engine_t *sr_engine = NULL;
|
|
|
|
|
// ppa_blend_engine_t *blending_engine = NULL;
|
|
|
|
|
// SemaphoreHandle_t sr_sem = NULL, blending_sem = NULL;
|
|
|
|
|
|
|
|
|
|
// // portENTER_CRITICAL(&s_platform.spinlock);
|
|
|
|
|
// if (!s_platform.group[group_id]) {
|
|
|
|
|
// if (pre_alloc_group) {
|
|
|
|
|
// new_group = true;
|
|
|
|
|
// ppa_hal_init(&pre_alloc_group->hal, group_id); // initialize HAL context
|
|
|
|
|
// pre_alloc_group->group_id = group_id;
|
|
|
|
|
// pre_alloc_group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
// s_platform.group[group_id] = pre_alloc_group; // register to platform
|
|
|
|
|
// // Enable the bus clock to access PPA registers
|
|
|
|
|
// PERIPH_RCC_ATOMIC() {
|
|
|
|
|
// ppa_ll_enable_bus_clock(group_id, true);
|
|
|
|
|
// ppa_ll_reset_register(group_id);
|
|
|
|
|
// }
|
|
|
|
|
// } else {
|
|
|
|
|
// ret = ESP_ERR_NO_MEM;
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
// // portEXIT_CRITICAL(&s_platform.spinlock);
|
|
|
|
|
|
|
|
|
|
// if (new_group) {
|
|
|
|
|
// // Get 2D-DMA pool handle
|
|
|
|
|
// dma2d_pool_config_t dma2d_config = {
|
|
|
|
|
// .pool_id = 0,
|
|
|
|
|
// };
|
|
|
|
|
// ret = dma2d_acquire_pool(&dma2d_config, &s_platform.group[group_id]->dma2d_pool_handle);
|
|
|
|
|
// if (ret != ESP_OK) {
|
|
|
|
|
// ESP_LOGE(TAG, "install 2D-DMA failed");
|
|
|
|
|
// goto err;
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// if (ret == ESP_OK && config->sr_engine_en) {
|
|
|
|
|
// sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// sr_sem = xSemaphoreCreateBinary();
|
|
|
|
|
// dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API
|
|
|
|
|
// dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // Register PPA SR engine
|
|
|
|
|
// portENTER_CRITICAL(&s_platform.group[group_id]->spinlock);
|
|
|
|
|
// if (!s_platform.group[group_id]->sr) {
|
|
|
|
|
// if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) {
|
|
|
|
|
// new_sr_engine = true;
|
|
|
|
|
// sr_engine->dma_tx_desc = sr_tx_dma_desc;
|
|
|
|
|
// sr_engine->dma_rx_desc = sr_rx_dma_desc;
|
|
|
|
|
// sr_engine->base.group = s_platform.group[group_id];
|
|
|
|
|
// sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
// sr_engine->base.sem = sr_sem;
|
|
|
|
|
// xSemaphoreGive(sr_engine->base.sem);
|
|
|
|
|
// sr_engine->base.in_accepting_trans_state = true;
|
|
|
|
|
// STAILQ_INIT(&sr_engine->base.trans_stailq);
|
|
|
|
|
// // sr_engine->base.event_cbs
|
|
|
|
|
// s_platform.group[group_id]->sr = sr_engine;
|
|
|
|
|
// } else {
|
|
|
|
|
// ret = ESP_ERR_NO_MEM;
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
// portEXIT_CRITICAL(&s_platform.group[group_id]->spinlock);
|
|
|
|
|
// if (ret == ESP_ERR_NO_MEM) {
|
|
|
|
|
// ESP_LOGE(TAG, "no mem to register PPA SR engine");
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// if (ret == ESP_OK && config->blending_engine_en) {
|
|
|
|
|
// blending_engine = heap_caps_calloc(1, sizeof(ppa_blend_engine_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// blending_sem = xSemaphoreCreateBinary();
|
|
|
|
|
// dma2d_descriptor_t *blending_tx_bg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API
|
|
|
|
|
// dma2d_descriptor_t *blending_tx_fg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// dma2d_descriptor_t *blending_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
// // Register PPA Blending engine
|
|
|
|
|
// portENTER_CRITICAL(&s_platform.group[group_id]->spinlock);
|
|
|
|
|
// if (!s_platform.group[group_id]->blending) {
|
|
|
|
|
// if (blending_engine && blending_sem && blending_tx_bg_dma_desc && blending_tx_fg_dma_desc && blending_rx_dma_desc) {
|
|
|
|
|
// new_blending_engine = true;
|
|
|
|
|
// blending_engine->dma_tx_bg_desc = blending_tx_bg_dma_desc;
|
|
|
|
|
// blending_engine->dma_tx_fg_desc = blending_tx_fg_dma_desc;
|
|
|
|
|
// blending_engine->dma_rx_desc = blending_rx_dma_desc;
|
|
|
|
|
// blending_engine->base.group = s_platform.group[group_id];
|
|
|
|
|
// blending_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
|
|
|
|
|
// blending_engine->base.sem = blending_sem;
|
|
|
|
|
// xSemaphoreGive(blending_engine->base.sem);
|
|
|
|
|
// blending_engine->base.in_accepting_trans_state = true;
|
|
|
|
|
// STAILQ_INIT(&blending_engine->base.trans_stailq);
|
|
|
|
|
// // blending_engine->base.event_cbs
|
|
|
|
|
// s_platform.group[group_id]->blending = blending_engine;
|
|
|
|
|
// } else {
|
|
|
|
|
// ret = ESP_ERR_NO_MEM;
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
// portEXIT_CRITICAL(&s_platform.group[group_id]->spinlock);
|
|
|
|
|
// if (ret == ESP_ERR_NO_MEM) {
|
|
|
|
|
// ESP_LOGE(TAG, "no mem to register PPA Blending engine");
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// if (!new_sr_engine) {
|
|
|
|
|
// free(sr_engine);
|
|
|
|
|
// if (sr_sem) vSemaphoreDelete(sr_sem);
|
|
|
|
|
// // TODO: free desc
|
|
|
|
|
// }
|
|
|
|
|
// if (!new_blending_engine) {
|
|
|
|
|
// free(blending_engine);
|
|
|
|
|
// if (blending_sem) vSemaphoreDelete(blending_sem);
|
|
|
|
|
// // TODO: free desc
|
|
|
|
|
// }
|
|
|
|
|
// err:
|
|
|
|
|
// if (ret != ESP_OK) {
|
|
|
|
|
// if (new_group) {
|
|
|
|
|
// ppa_module_release(s_platform.group[group_id]);
|
|
|
|
|
// }
|
|
|
|
|
// }
|
|
|
|
|
// if (!new_group) {
|
|
|
|
|
// free(pre_alloc_group);
|
|
|
|
|
// }
|
|
|
|
|
// *ret_group = s_platform.group[group_id];
|
|
|
|
|
// return ret;
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// esp_err_t ppa_module_release(ppa_group_handle_t ppa_group)
|
|
|
|
|
// {
|
|
|
|
|
// esp_err_t ret = ESP_OK;
|
|
|
|
|
// ESP_RETURN_ON_FALSE(ppa_group, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
|
|
|
|
|
|
|
|
|
// bool do_deinitialize = false;
|
|
|
|
|
// int group_id = ppa_group->group_id;
|
|
|
|
|
// ppa_sr_engine_t *sr_engine = ppa_group->sr;
|
|
|
|
|
// ppa_blend_engine_t *blending_engine = ppa_group->blending;
|
|
|
|
|
// bool sr_no_waiting_trans = true;
|
|
|
|
|
// bool blending_no_waiting_trans = true;
|
|
|
|
|
|
|
|
|
|
// // portENTER_CRITICAL(&s_platform.spinlock);
|
|
|
|
|
// portENTER_CRITICAL(&ppa_group->spinlock);
|
|
|
|
|
// if (sr_engine) {
|
|
|
|
|
// sr_engine->base.in_accepting_trans_state = false;
|
|
|
|
|
// portENTER_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
// sr_no_waiting_trans = STAILQ_EMPTY(&sr_engine->base.trans_stailq);
|
|
|
|
|
// portEXIT_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
// }
|
|
|
|
|
// if (blending_engine) {
|
|
|
|
|
// blending_engine->base.in_accepting_trans_state = false;
|
|
|
|
|
// portENTER_CRITICAL(&blending_engine->base.spinlock);
|
|
|
|
|
// blending_no_waiting_trans = STAILQ_EMPTY(&blending_engine->base.trans_stailq);
|
|
|
|
|
// portEXIT_CRITICAL(&blending_engine->base.spinlock);
|
|
|
|
|
// }
|
|
|
|
|
// portEXIT_CRITICAL(&ppa_group->spinlock);
|
|
|
|
|
// if (sr_no_waiting_trans && blending_no_waiting_trans) {
|
|
|
|
|
// do_deinitialize = true;
|
|
|
|
|
// ppa_group->sr = NULL;
|
|
|
|
|
// ppa_group->blending = NULL;
|
|
|
|
|
// s_platform.group[group_id] = NULL;
|
|
|
|
|
// } else {
|
|
|
|
|
// ret = ESP_FAIL;
|
|
|
|
|
// }
|
|
|
|
|
// // portEXIT_CRITICAL(&s_platform.spinlock);
|
|
|
|
|
|
|
|
|
|
// if (do_deinitialize) {
|
|
|
|
|
// if (sr_engine) {
|
|
|
|
|
// free(sr_engine->dma_tx_desc);
|
|
|
|
|
// free(sr_engine->dma_rx_desc);
|
|
|
|
|
// vSemaphoreDelete(sr_engine->base.sem);
|
|
|
|
|
// free(sr_engine);
|
|
|
|
|
// }
|
|
|
|
|
// if (blending_engine) {
|
|
|
|
|
// free(blending_engine->dma_tx_bg_desc);
|
|
|
|
|
// free(blending_engine->dma_tx_fg_desc);
|
|
|
|
|
// free(blending_engine->dma_rx_desc);
|
|
|
|
|
// vSemaphoreDelete(blending_engine->base.sem);
|
|
|
|
|
// free(blending_engine);
|
|
|
|
|
// }
|
|
|
|
|
// dma2d_release_pool(ppa_group->dma2d_pool_handle);
|
|
|
|
|
// // Disable the bus clock to access PPA registers
|
|
|
|
|
// PERIPH_RCC_ATOMIC() {
|
|
|
|
|
// ppa_ll_enable_bus_clock(group_id, false);
|
|
|
|
|
// }
|
|
|
|
|
// free(ppa_group);
|
|
|
|
|
// }
|
|
|
|
|
// return ret;
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// Each PPA engine should only have one transaction being pushed to 2D-DMA queue, the rest transactions should stay in engine's own transaction queue.
|
|
|
|
|
// This is to avoid 2D-DMA channels being hold, but not actually being used (waiting for PPA engine to be free)
|
|
|
|
|
static esp_err_t ppa_dma2d_enqueue(const ppa_trans_t *trans_elm)
|
|
|
|
|
{
|
|
|
|
|
return dma2d_enqueue(s_platform.dma2d_pool_handle, trans_elm->trans_desc, trans_elm->dma_trans_placeholder);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void ppa_recycle_transaction(ppa_trans_t *trans_elm)
|
|
|
|
|
{
|
|
|
|
|
if (trans_elm) {
|
|
|
|
|
if (trans_elm->trans_desc) {
|
|
|
|
|
ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)trans_elm->trans_desc->user_config;
|
|
|
|
|
if (trans_on_picked_desc) {
|
|
|
|
|
if (trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SR) {
|
|
|
|
|
free(trans_on_picked_desc->sr_desc);
|
|
|
|
|
} else if (trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_BLEND) {
|
|
|
|
|
free(trans_on_picked_desc->blend_desc);
|
|
|
|
|
}
|
|
|
|
|
free(trans_on_picked_desc);
|
|
|
|
|
}
|
|
|
|
|
free(trans_elm->trans_desc);
|
|
|
|
|
}
|
|
|
|
|
if (trans_elm->sem) {
|
|
|
|
|
vSemaphoreDelete(trans_elm->sem);
|
|
|
|
|
}
|
|
|
|
|
free(trans_elm);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool ppa_sr_transaction_done_cb(dma2d_channel_handle_t dma2d_chan, dma2d_event_data_t *event_data, void *user_data)
|
|
|
|
|
{
|
|
|
|
|
bool need_yield = false;
|
|
|
|
|
BaseType_t HPTaskAwoken;
|
|
|
|
|
ppa_trans_t *trans_elm = (ppa_trans_t *)user_data;
|
|
|
|
|
ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)trans_elm->trans_desc->user_config;
|
|
|
|
|
assert(trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SR);
|
|
|
|
|
ppa_sr_engine_t *sr_engine = __containerof(trans_on_picked_desc->ppa_engine, ppa_sr_engine_t, base);
|
|
|
|
|
// ppa_group_t *ppa_group = sr_engine->base.group;
|
|
|
|
|
|
|
|
|
|
ppa_trans_t *next_start_trans = NULL;
|
|
|
|
|
portENTER_CRITICAL_ISR(&sr_engine->base.spinlock);
|
|
|
|
|
// Remove this transaction from transaction queue
|
|
|
|
|
STAILQ_REMOVE(&sr_engine->base.trans_stailq, trans_elm, ppa_trans_s, entry);
|
|
|
|
|
next_start_trans = STAILQ_FIRST(&sr_engine->base.trans_stailq);
|
|
|
|
|
portEXIT_CRITICAL_ISR(&sr_engine->base.spinlock);
|
|
|
|
|
|
|
|
|
|
// If there is next trans in PPA engine queue, send it to DMA queue; otherwise, release the engine semaphore
|
|
|
|
|
if (next_start_trans) {
|
|
|
|
|
esp_rom_printf("from ISR -");
|
|
|
|
|
ppa_dma2d_enqueue(next_start_trans);
|
|
|
|
|
} else {
|
|
|
|
|
xSemaphoreGiveFromISR(sr_engine->base.sem, &HPTaskAwoken);
|
|
|
|
|
need_yield |= (HPTaskAwoken == pdTRUE);
|
|
|
|
|
}
|
|
|
|
|
esp_rom_printf("trans addr: 0x%x\n", trans_elm);
|
|
|
|
|
// Recycle transaction or give transaction semaphore
|
|
|
|
|
if (trans_elm->sem != NULL) {
|
|
|
|
|
xSemaphoreGiveFromISR(trans_elm->sem, &HPTaskAwoken);
|
|
|
|
|
need_yield |= (HPTaskAwoken == pdTRUE);
|
|
|
|
|
} else {
|
|
|
|
|
ppa_recycle_transaction(trans_elm);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO: how to notify non-blocking transaction
|
|
|
|
|
|
|
|
|
|
return need_yield;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool ppa_sr_transaction_on_picked(uint32_t channel_num, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config)
|
|
|
|
|
{
|
|
|
|
|
assert(channel_num == 2 && dma2d_chans && user_config);
|
|
|
|
|
ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)user_config;
|
|
|
|
|
assert(trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SR && trans_on_picked_desc->sr_desc && trans_on_picked_desc->ppa_engine);
|
|
|
|
|
|
|
|
|
|
ppa_sr_transaction_t *sr_trans_desc = trans_on_picked_desc->sr_desc;
|
|
|
|
|
ppa_sr_engine_t *sr_engine = __containerof(trans_on_picked_desc->ppa_engine, ppa_sr_engine_t, base);
|
|
|
|
|
// ppa_group_t *ppa_group = sr_engine->base.group;
|
|
|
|
|
|
|
|
|
|
// Free 2D-DMA transaction placeholder (transaction has already been moved out from 2D-DMA queue)
|
|
|
|
|
free(trans_on_picked_desc->trans_elm->dma_trans_placeholder);
|
|
|
|
|
|
|
|
|
|
// Get the required 2D-DMA channel handles
|
|
|
|
|
uint32_t dma2d_tx_chan_idx = 0;
|
|
|
|
|
uint32_t dma2d_rx_chan_idx = 1;
|
|
|
|
|
if (dma2d_chans[0].dir == DMA2D_CHANNEL_DIRECTION_RX) {
|
|
|
|
|
dma2d_tx_chan_idx = 1;
|
|
|
|
|
dma2d_rx_chan_idx = 0;
|
|
|
|
|
}
|
|
|
|
|
dma2d_channel_handle_t dma2d_tx_chan = dma2d_chans[dma2d_tx_chan_idx].chan;
|
|
|
|
|
dma2d_channel_handle_t dma2d_rx_chan = dma2d_chans[dma2d_rx_chan_idx].chan;
|
|
|
|
|
|
|
|
|
|
// Write back and invalidate are performed on the entire picture (the window content is not continuous in the buffer)
|
|
|
|
|
// Write back in_buffer
|
|
|
|
|
uint32_t in_buffer_len = sr_trans_desc->in_pic_w * sr_trans_desc->in_pic_h * color_hal_pixel_format_get_bit_depth((color_space_pixel_format_t) {
|
|
|
|
|
.color_type_id = sr_trans_desc->in_color.mode
|
|
|
|
|
}) / 8;
|
|
|
|
|
esp_cache_msync(sr_trans_desc->in_buffer, in_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
|
|
|
|
|
// Invalidate out_buffer
|
|
|
|
|
uint32_t out_buffer_len = sr_trans_desc->out_pic_w * sr_trans_desc->out_pic_h * color_hal_pixel_format_get_bit_depth((color_space_pixel_format_t) {
|
|
|
|
|
.color_type_id = sr_trans_desc->out_color.mode
|
|
|
|
|
}) / 8;
|
|
|
|
|
esp_cache_msync(sr_trans_desc->out_buffer, out_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
|
|
|
|
|
|
|
|
|
|
// Fill 2D-DMA descriptors
|
|
|
|
|
sr_engine->dma_tx_desc->vb_size = sr_trans_desc->in_block_h;
|
|
|
|
|
sr_engine->dma_tx_desc->hb_length = sr_trans_desc->in_block_w;
|
|
|
|
|
sr_engine->dma_tx_desc->err_eof = 0;
|
|
|
|
|
sr_engine->dma_tx_desc->dma2d_en = 1;
|
|
|
|
|
sr_engine->dma_tx_desc->suc_eof = 1;
|
|
|
|
|
sr_engine->dma_tx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA;
|
|
|
|
|
sr_engine->dma_tx_desc->va_size = sr_trans_desc->in_pic_h;
|
|
|
|
|
sr_engine->dma_tx_desc->ha_length = sr_trans_desc->in_pic_w;
|
|
|
|
|
sr_engine->dma_tx_desc->pbyte = dma2d_desc_pixel_format_to_pbyte_value((color_space_pixel_format_t) {
|
|
|
|
|
.color_type_id = sr_trans_desc->in_color.mode
|
|
|
|
|
}); // check in 912 whether this field can be ignored (911 seems cannot) No! Why?
|
|
|
|
|
sr_engine->dma_tx_desc->y = sr_trans_desc->in_block_offset_y;
|
|
|
|
|
sr_engine->dma_tx_desc->x = sr_trans_desc->in_block_offset_x;
|
|
|
|
|
sr_engine->dma_tx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE;
|
|
|
|
|
sr_engine->dma_tx_desc->buffer = (void *)sr_trans_desc->in_buffer;
|
|
|
|
|
sr_engine->dma_tx_desc->next = NULL;
|
|
|
|
|
|
|
|
|
|
// sr_engine->dma_rx_desc->vb_size = sr_trans_desc->in_block_h; // check in 912 whether this field can be ignored (911 seems cannot) No! Why?
|
|
|
|
|
// sr_engine->dma_rx_desc->hb_length = sr_trans_desc->in_block_w; // check in 912 whether this field can be ignored (911 seems cannot) No! Why?
|
|
|
|
|
sr_engine->dma_rx_desc->vb_size = 1;
|
|
|
|
|
sr_engine->dma_rx_desc->hb_length = 1;
|
|
|
|
|
sr_engine->dma_rx_desc->err_eof = 0;
|
|
|
|
|
sr_engine->dma_rx_desc->dma2d_en = 1;
|
|
|
|
|
sr_engine->dma_rx_desc->suc_eof = 1;
|
|
|
|
|
sr_engine->dma_rx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA;
|
|
|
|
|
sr_engine->dma_rx_desc->va_size = sr_trans_desc->out_pic_h;
|
|
|
|
|
sr_engine->dma_rx_desc->ha_length = sr_trans_desc->out_pic_w;
|
|
|
|
|
sr_engine->dma_rx_desc->pbyte = dma2d_desc_pixel_format_to_pbyte_value((color_space_pixel_format_t) {
|
|
|
|
|
.color_type_id = sr_trans_desc->out_color.mode
|
|
|
|
|
}); // check in 912 whether this field can be ignored (911 seems cannot) No! Why?
|
|
|
|
|
sr_engine->dma_rx_desc->y = sr_trans_desc->out_block_offset_y;
|
|
|
|
|
sr_engine->dma_rx_desc->x = sr_trans_desc->out_block_offset_x;
|
|
|
|
|
sr_engine->dma_rx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE;
|
|
|
|
|
sr_engine->dma_rx_desc->buffer = (void *)sr_trans_desc->out_buffer;
|
|
|
|
|
sr_engine->dma_rx_desc->next = NULL;
|
|
|
|
|
|
|
|
|
|
esp_cache_msync((void *)sr_engine->dma_tx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
|
|
|
|
|
esp_cache_msync((void *)sr_engine->dma_rx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
|
|
|
|
|
|
|
|
|
|
// Configure 2D-DMA channels
|
|
|
|
|
dma2d_trigger_t trig_periph = {
|
|
|
|
|
.periph = DMA2D_TRIG_PERIPH_PPA_SR,
|
|
|
|
|
.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SR_TX,
|
|
|
|
|
};
|
|
|
|
|
dma2d_connect(dma2d_tx_chan, &trig_periph);
|
|
|
|
|
trig_periph.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SR_RX;
|
|
|
|
|
dma2d_connect(dma2d_rx_chan, &trig_periph);
|
|
|
|
|
|
|
|
|
|
dma2d_transfer_ability_t dma_transfer_ability = {
|
|
|
|
|
.data_burst_length = DMA2D_DATA_BURST_LENGTH_128,
|
|
|
|
|
.desc_burst_en = true,
|
|
|
|
|
.mb_size = DMA2D_MACRO_BLOCK_SIZE_NONE,
|
|
|
|
|
};
|
|
|
|
|
dma2d_set_transfer_ability(dma2d_tx_chan, &dma_transfer_ability);
|
|
|
|
|
dma2d_set_transfer_ability(dma2d_rx_chan, &dma_transfer_ability);
|
|
|
|
|
|
|
|
|
|
// TODO: configuring this doesn't seem helping anything? Shouldn't it related to descriptor pbyte?
|
|
|
|
|
dma2d_strategy_config_t dma_strategy = {
|
|
|
|
|
.auto_update_desc = true,
|
|
|
|
|
};
|
|
|
|
|
dma2d_apply_strategy(dma2d_tx_chan, &dma_strategy);
|
|
|
|
|
|
|
|
|
|
// YUV444 and YUV422 are not supported by PPA module, need to utilize 2D-DMA color space conversion feature to do a conversion
|
|
|
|
|
ppa_sr_color_mode_t ppa_in_color_mode = sr_trans_desc->in_color.mode;
|
|
|
|
|
if (ppa_in_color_mode == PPA_SR_COLOR_MODE_YUV444) {
|
|
|
|
|
ppa_in_color_mode = PPA_SR_COLOR_MODE_RGB888;
|
|
|
|
|
dma2d_csc_config_t dma_tx_csc = {0};
|
|
|
|
|
if (sr_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) {
|
|
|
|
|
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_601;
|
|
|
|
|
} else {
|
|
|
|
|
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_709;
|
|
|
|
|
}
|
|
|
|
|
dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc);
|
|
|
|
|
} else if (ppa_in_color_mode == PPA_SR_COLOR_MODE_YUV422) {
|
|
|
|
|
ppa_in_color_mode = PPA_SR_COLOR_MODE_RGB888;
|
|
|
|
|
dma2d_csc_config_t dma_tx_csc = {0};
|
|
|
|
|
if (sr_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) {
|
|
|
|
|
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_601;
|
|
|
|
|
} else {
|
|
|
|
|
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_709;
|
|
|
|
|
}
|
|
|
|
|
dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ppa_sr_color_mode_t ppa_out_color_mode = sr_trans_desc->out_color.mode;
|
|
|
|
|
if (ppa_out_color_mode == PPA_SR_COLOR_MODE_YUV444) {
|
|
|
|
|
ppa_out_color_mode = PPA_SR_COLOR_MODE_YUV420;
|
|
|
|
|
dma2d_csc_config_t dma_rx_csc = {
|
|
|
|
|
.rx_csc_option = DMA2D_CSC_RX_YUV420_TO_YUV444,
|
|
|
|
|
};
|
|
|
|
|
dma2d_configure_color_space_conversion(dma2d_rx_chan, &dma_rx_csc);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dma2d_rx_event_callbacks_t dma_event_cbs = {
|
|
|
|
|
.on_recv_eof = ppa_sr_transaction_done_cb,
|
|
|
|
|
};
|
|
|
|
|
dma2d_register_rx_event_callbacks(dma2d_rx_chan, &dma_event_cbs, (void *)trans_on_picked_desc->trans_elm);
|
|
|
|
|
|
|
|
|
|
ppa_ll_sr_reset(s_platform.hal.dev);
|
|
|
|
|
|
|
|
|
|
dma2d_set_desc_addr(dma2d_tx_chan, (intptr_t)sr_engine->dma_tx_desc);
|
|
|
|
|
dma2d_set_desc_addr(dma2d_rx_chan, (intptr_t)sr_engine->dma_rx_desc);
|
|
|
|
|
dma2d_start(dma2d_tx_chan);
|
|
|
|
|
dma2d_start(dma2d_rx_chan);
|
|
|
|
|
|
|
|
|
|
// Configure PPA SR engine
|
|
|
|
|
ppa_ll_sr_set_rx_color_mode(s_platform.hal.dev, ppa_in_color_mode);
|
|
|
|
|
if (COLOR_SPACE_TYPE(ppa_in_color_mode) == COLOR_SPACE_YUV) {
|
|
|
|
|
ppa_ll_sr_set_rx_yuv_range(s_platform.hal.dev, sr_trans_desc->in_color.yuv_range);
|
|
|
|
|
ppa_ll_sr_set_yuv2rgb_std(s_platform.hal.dev, sr_trans_desc->in_color.yuv_std);
|
|
|
|
|
}
|
|
|
|
|
ppa_ll_sr_enable_rx_byte_swap(s_platform.hal.dev, sr_trans_desc->in_color.byte_swap);
|
|
|
|
|
ppa_ll_sr_enable_rx_rgb_swap(s_platform.hal.dev, sr_trans_desc->in_color.rgb_swap);
|
|
|
|
|
|
|
|
|
|
ppa_ll_sr_set_tx_color_mode(s_platform.hal.dev, ppa_out_color_mode);
|
|
|
|
|
if (COLOR_SPACE_TYPE(ppa_out_color_mode) == COLOR_SPACE_YUV) {
|
|
|
|
|
ppa_ll_sr_set_rx_yuv_range(s_platform.hal.dev, sr_trans_desc->out_color.yuv_range);
|
|
|
|
|
ppa_ll_sr_set_yuv2rgb_std(s_platform.hal.dev, sr_trans_desc->out_color.yuv_std);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ppa_ll_sr_configure_rx_alpha(s_platform.hal.dev, sr_trans_desc->alpha_mode, sr_trans_desc->alpha_value);
|
|
|
|
|
|
|
|
|
|
// TODO: sr_macro_bk_ro_bypass
|
|
|
|
|
ppa_ll_sr_set_rotation_angle(s_platform.hal.dev, sr_trans_desc->rotation_angle);
|
|
|
|
|
ppa_ll_sr_set_scaling_x(s_platform.hal.dev, (uint32_t)sr_trans_desc->scale_x, (uint32_t)(sr_trans_desc->scale_x * (PPA_LL_SR_SCALING_FRAG_MAX + 1)) & PPA_LL_SR_SCALING_FRAG_MAX);
|
|
|
|
|
ppa_ll_sr_set_scaling_y(s_platform.hal.dev, (uint32_t)sr_trans_desc->scale_y, (uint32_t)(sr_trans_desc->scale_y * (PPA_LL_SR_SCALING_FRAG_MAX + 1)) & PPA_LL_SR_SCALING_FRAG_MAX);
|
|
|
|
|
ppa_ll_sr_enable_mirror_x(s_platform.hal.dev, sr_trans_desc->mirror_x);
|
|
|
|
|
ppa_ll_sr_enable_mirror_y(s_platform.hal.dev, sr_trans_desc->mirror_y);
|
|
|
|
|
|
|
|
|
|
ppa_ll_sr_start(s_platform.hal.dev);
|
|
|
|
|
|
|
|
|
|
// dma2d_start(dma2d_tx_chan);
|
|
|
|
|
// dma2d_start(dma2d_rx_chan);
|
|
|
|
|
|
|
|
|
|
// No need to yield
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
esp_err_t ppa_do_scale_and_rotate(ppa_engine_handle_t ppa_engine, const ppa_sr_trans_config_t *config, ppa_trans_mode_t mode)
|
|
|
|
|
{
|
|
|
|
|
esp_err_t ret = ESP_OK;
|
|
|
|
|
ESP_RETURN_ON_FALSE(ppa_engine && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
|
|
|
|
|
ESP_RETURN_ON_FALSE(ppa_engine->type == PPA_ENGINE_TYPE_SR, ESP_ERR_INVALID_ARG, TAG, "wrong engine handle");
|
|
|
|
|
ESP_RETURN_ON_FALSE(mode <= PPA_TRANS_MODE_NON_BLOCKING, ESP_ERR_INVALID_ARG, TAG, "invalid mode");
|
|
|
|
|
// Any restrictions on in/out buffer address? alignment? alignment restriction comes from cache, its addr and size need to be aligned to cache line size on 912!
|
|
|
|
|
// buffer on stack/heap
|
|
|
|
|
// ESP_RETURN_ON_FALSE(config->rotation_angle)
|
|
|
|
|
// ESP_RETURN_ON_FALSE(config->in/out_color_mode)
|
|
|
|
|
// what if in_color is YUV420, out is RGB, what is out RGB range? Full range?
|
|
|
|
|
ESP_RETURN_ON_FALSE(config->scale_x < (PPA_LL_SR_SCALING_INT_MAX + 1) && config->scale_x >= (1.0 / PPA_LL_SR_SCALING_FRAG_MAX) &&
|
|
|
|
|
config->scale_y < (PPA_LL_SR_SCALING_INT_MAX + 1) && config->scale_y >= (1.0 / PPA_LL_SR_SCALING_FRAG_MAX),
|
|
|
|
|
ESP_ERR_INVALID_ARG, TAG, "invalid scale");
|
|
|
|
|
// byte/rgb swap with color mode only to (A)RGB color space?
|
|
|
|
|
|
|
|
|
|
ppa_sr_engine_t *sr_engine = __containerof(ppa_engine, ppa_sr_engine_t, base);
|
|
|
|
|
// ESP_RETURN_ON_FALSE(sr_engine, ESP_FAIL, TAG, "SR engine not registered, please register through ppa_module_acquire first");
|
|
|
|
|
|
|
|
|
|
ppa_trans_t *new_trans_elm = (ppa_trans_t *)heap_caps_calloc(1, sizeof(ppa_trans_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
dma2d_trans_t *dma_trans_elm = (dma2d_trans_t *)heap_caps_calloc(1, SIZEOF_DMA2D_TRANS_T, PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
dma2d_trans_config_t *dma_trans_desc = (dma2d_trans_config_t *)heap_caps_calloc(1, sizeof(dma2d_trans_config_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)heap_caps_calloc(1, sizeof(ppa_dma2d_trans_on_picked_config_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
ppa_sr_transaction_t *ppa_trans_desc = (ppa_sr_transaction_t *)heap_caps_calloc(1, sizeof(ppa_sr_transaction_t), PPA_MEM_ALLOC_CAPS);
|
|
|
|
|
ESP_GOTO_ON_FALSE(new_trans_elm && dma_trans_elm && dma_trans_desc && trans_on_picked_desc && ppa_trans_desc, ESP_ERR_NO_MEM, err, TAG, "no mem for transaction storage");
|
|
|
|
|
if (mode == PPA_TRANS_MODE_BLOCKING) {
|
|
|
|
|
new_trans_elm->sem = xSemaphoreCreateBinary();
|
|
|
|
|
ESP_GOTO_ON_FALSE(new_trans_elm->sem, ESP_ERR_NO_MEM, err, TAG, "no mem for transaction storage");
|
|
|
|
|
}
|
|
|
|
|
esp_rom_printf("new trans addr: 0x%x\n", new_trans_elm);
|
|
|
|
|
memcpy(ppa_trans_desc, config, sizeof(ppa_sr_trans_config_t));
|
|
|
|
|
|
|
|
|
|
trans_on_picked_desc->sr_desc = ppa_trans_desc;
|
|
|
|
|
trans_on_picked_desc->ppa_engine = &sr_engine->base;
|
|
|
|
|
trans_on_picked_desc->trans_elm = new_trans_elm;
|
|
|
|
|
trans_on_picked_desc->trigger_periph = DMA2D_TRIG_PERIPH_PPA_SR;
|
|
|
|
|
|
|
|
|
|
dma_trans_desc->tx_channel_num = 1;
|
|
|
|
|
dma_trans_desc->rx_channel_num = 1;
|
|
|
|
|
// TODO: reserved channels
|
|
|
|
|
dma_trans_desc->user_config = (void *)trans_on_picked_desc;
|
|
|
|
|
dma_trans_desc->on_job_picked = ppa_sr_transaction_on_picked;
|
|
|
|
|
|
|
|
|
|
new_trans_elm->trans_desc = dma_trans_desc;
|
|
|
|
|
new_trans_elm->dma_trans_placeholder = dma_trans_elm;
|
|
|
|
|
|
|
|
|
|
portENTER_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
if (sr_engine->base.in_accepting_trans_state) {
|
|
|
|
|
// Send transaction into PPA SR engine queue
|
|
|
|
|
STAILQ_INSERT_TAIL(&sr_engine->base.trans_stailq, new_trans_elm, entry);
|
|
|
|
|
} else {
|
|
|
|
|
ret = ESP_FAIL;
|
|
|
|
|
}
|
|
|
|
|
portEXIT_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
|
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
|
ESP_LOGE(TAG, "SR engine cannot accept transaction now");
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TickType_t ticks_to_wait = (mode == PPA_TRANS_MODE_NON_BLOCKING) ? 0 : portMAX_DELAY;
|
|
|
|
|
if (xSemaphoreTake(sr_engine->base.sem, ticks_to_wait) == pdTRUE) {
|
|
|
|
|
// Check if the transaction has already been started from the ISR
|
|
|
|
|
// If so, then the transaction should have been removed from queue at this moment (transaction completed)
|
|
|
|
|
bool found = false;
|
|
|
|
|
ppa_trans_t *temp = NULL;
|
|
|
|
|
portENTER_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
STAILQ_FOREACH(temp, &sr_engine->base.trans_stailq, entry) {
|
|
|
|
|
if (temp == new_trans_elm) {
|
|
|
|
|
found = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
portEXIT_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
if (found) {
|
|
|
|
|
ret = ppa_dma2d_enqueue(new_trans_elm);
|
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
|
portENTER_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
STAILQ_REMOVE(&sr_engine->base.trans_stailq, new_trans_elm, ppa_trans_s, entry);
|
|
|
|
|
portEXIT_CRITICAL(&sr_engine->base.spinlock);
|
|
|
|
|
xSemaphoreGive(sr_engine->base.sem);
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
xSemaphoreGive(sr_engine->base.sem);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (mode == PPA_TRANS_MODE_BLOCKING) {
|
|
|
|
|
xSemaphoreTake(new_trans_elm->sem, portMAX_DELAY); // Given in the ISR
|
|
|
|
|
// Sanity check new_trans_elm not in trans_stailq anymore? (loop takes time tho)
|
|
|
|
|
ppa_recycle_transaction(new_trans_elm);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err:
|
|
|
|
|
if (ret != ESP_OK) {
|
|
|
|
|
ppa_recycle_transaction(new_trans_elm);
|
|
|
|
|
}
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
esp_err_t ppa_do_blend(ppa_engine_handle_t ppa_engine, const ppa_blend_trans_config_t *config, ppa_trans_mode_t mode)
|
|
|
|
|
{
|
|
|
|
|
return ESP_OK;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
esp_err_t ppa_do_fill(ppa_engine_handle_t ppa_engine, const ppa_fill_trans_config_t *config, ppa_trans_mode_t mode)
|
|
|
|
|
{
|
|
|
|
|
return ESP_OK;
|
|
|
|
|
}
|