diff --git a/components/esp_driver_ppa/CMakeLists.txt b/components/esp_driver_ppa/CMakeLists.txt new file mode 100644 index 0000000000..5647fac3cb --- /dev/null +++ b/components/esp_driver_ppa/CMakeLists.txt @@ -0,0 +1,11 @@ +set(srcs) +set(public_include "include") +if(CONFIG_SOC_PPA_SUPPORTED) + list(APPEND srcs "src/ppa.c") +endif() + +idf_component_register(SRCS ${srcs} + INCLUDE_DIRS ${public_include} + PRIV_REQUIRES esp_mm + # LDFRAGMENTS "linker.lf" + ) diff --git a/components/esp_driver_ppa/include/driver/ppa.h b/components/esp_driver_ppa/include/driver/ppa.h new file mode 100644 index 0000000000..781ff5e07a --- /dev/null +++ b/components/esp_driver_ppa/include/driver/ppa.h @@ -0,0 +1,144 @@ +/* + * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#include +#include "esp_err.h" +#include "hal/ppa_types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Type of PPA engine handle + */ +typedef struct ppa_engine_t *ppa_engine_handle_t; + +typedef struct { + ppa_engine_type_t engine; +} ppa_engine_config_t; + +esp_err_t ppa_engine_acquire(const ppa_engine_config_t *config, ppa_engine_handle_t *ret_engine); + +esp_err_t ppa_engine_release(ppa_engine_handle_t ppa_engine); + +typedef enum { + PPA_TRANS_MODE_BLOCKING, + PPA_TRANS_MODE_NON_BLOCKING, +} ppa_trans_mode_t; + +typedef struct { + void *in_buffer; // TODO: could be a buffer list, link descriptors together, process a batch + // uint32_t batch_num; // However, is it necessary? psram can not store too many pictures + uint32_t in_pic_w; + uint32_t in_pic_h; + uint32_t in_block_w; + uint32_t in_block_h; + uint32_t in_block_offset_x; + uint32_t in_block_offset_y; + + void *out_buffer; // TODO: alignment restriction + uint32_t out_pic_w; + uint32_t out_pic_h; + uint32_t out_block_offset_x; + uint32_t out_block_offset_y; + + ppa_sr_rotation_angle_t rotation_angle; + float scale_x; + float scale_y; + bool mirror_x; + bool mirror_y; + + struct { + ppa_sr_color_mode_t mode; + color_range_t yuv_range; + color_conv_std_rgb_yuv_t yuv_std; + bool rgb_swap; + bool byte_swap; + } in_color; + + struct { + ppa_sr_color_mode_t mode; + color_range_t yuv_range; + color_conv_std_rgb_yuv_t yuv_std; + } out_color; + + ppa_alpha_mode_t alpha_mode; + uint32_t alpha_value; /*!< When PPA_ALPHA_FIX_VALUE mode is selected, alpha_value is the alpha value to be replaced with (output_alpha = alpha_value) + When PPA_ALPHA_SCALE mode is selected, alpha_value/256 is the multiplier to the input alpha value (output_alpha = input_alpha * alpha_value / 256) + When other alpha modes are selected, this field is not used*/ +} ppa_sr_trans_config_t; + +/** + * @brief Perform a scaling-and-rotating (SR) operation to a picture + * + * @param[in] ppa_engine PPA engine handle with `PPA_ENGINE_TYPE_SR` as the engine type + * @param[in] config Pointer to a collection of configurations for the SR operation, ppa_sr_trans_config_t + * @param[in] mode Select one mode from ppa_trans_mode_t + * + * @return + * - ESP_OK: + * - ESP_ERR_INVALID_ARG: + * - ESP_ERR_NO_MEM: + * - ESP_FAIL: + */ +esp_err_t ppa_do_scale_and_rotate(ppa_engine_handle_t ppa_engine, const ppa_sr_trans_config_t *config, ppa_trans_mode_t mode); + +typedef struct { + void *in_bg_buffer; + void *in_fg_buffer; + void *out_buffer; +} ppa_blend_trans_config_t; + +/** + * @brief Perform a blending operation to a picture + * + * @param[in] ppa_engine PPA engine handle with `PPA_ENGINE_TYPE_BLEND` as the engine type + * @param[in] config Pointer to a collection of configurations for the blending operation, ppa_blend_trans_config_t + * @param[in] mode Select one mode from ppa_trans_mode_t + * + * @return + * - ESP_OK: + * - ESP_ERR_INVALID_ARG: + * - ESP_ERR_NO_MEM: + * - ESP_FAIL: + */ +esp_err_t ppa_do_blend(ppa_engine_handle_t ppa_engine, const ppa_blend_trans_config_t *config, ppa_trans_mode_t mode); + +typedef struct { + void *out_buffer; +} ppa_fill_trans_config_t; + +/** + * @brief Perform a filling operation to a picture + * + * @param[in] ppa_engine PPA engine handle with `PPA_ENGINE_TYPE_BLEND` as the engine type + * @param[in] config Pointer to a collection of configurations for the filling operation, ppa_fill_trans_config_t + * @param[in] mode Select one mode from ppa_trans_mode_t + * + * @return + * - ESP_OK: + * - ESP_ERR_INVALID_ARG: + * - ESP_ERR_NO_MEM: + * - ESP_FAIL: + */ +esp_err_t ppa_do_fill(ppa_engine_handle_t ppa_engine, const ppa_fill_trans_config_t *config, ppa_trans_mode_t mode); + +// SR and Blending are independent, can work at the same time +// Fill is in blend, so fill and blend cannot work at the same time + +// Consider blocking and non-blocking options +// Non-blocking may require notification of process done event + +// dma2d done/eof callback, when/how to free dma2d transaction content + +// cache writeback/invalidate + +#ifdef __cplusplus +} +#endif diff --git a/components/esp_driver_ppa/src/ppa.c b/components/esp_driver_ppa/src/ppa.c new file mode 100644 index 0000000000..71b2365508 --- /dev/null +++ b/components/esp_driver_ppa/src/ppa.c @@ -0,0 +1,966 @@ +/* + * SPDX-FileCopyrightText: 2023-2024 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include "esp_check.h" +#include "esp_log.h" +#include "freertos/portmacro.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "freertos/semphr.h" +#include "esp_heap_caps.h" +#include "esp_cache.h" +#include "hal/cache_hal.h" +#include "hal/cache_ll.h" +#include "driver/ppa.h" +#include "esp_private/dma2d.h" +#include "hal/dma2d_ll.h" +#include "soc/dma2d_channel.h" +#include "hal/ppa_ll.h" +#include "hal/ppa_types.h" +#include "hal/color_types.h" +#include "hal/color_hal.h" +#include "esp_private/periph_ctrl.h" + +#define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1)) + +static const char *TAG = "ppa"; + +typedef struct ppa_dev_t *ppa_soc_handle_t; // PPA SOC layer handle + +typedef struct { + ppa_soc_handle_t dev; +} ppa_hal_context_t; + +void ppa_hal_init(ppa_hal_context_t *hal) +{ + hal->dev = PPA_LL_GET_HW; +} + +void ppa_hal_deinit(ppa_hal_context_t *hal) +{ + hal->dev = NULL; +} + +// PPA module contains SR engine and Blending engine + +// typedef struct ppa_group_t ppa_group_t; +typedef struct ppa_engine_t ppa_engine_t; + +typedef ppa_sr_trans_config_t ppa_sr_transaction_t; + +typedef struct { + +} ppa_blend_transaction_t; + +typedef struct ppa_trans_s { + STAILQ_ENTRY(ppa_trans_s) entry; // link entry + // union { + // const ppa_sr_transaction_t *sr_desc; + // const ppa_blending_transaction_t *blending_desc; + // }; + dma2d_trans_config_t *trans_desc; + dma2d_trans_t *dma_trans_placeholder; + SemaphoreHandle_t sem; +} ppa_trans_t; + +typedef struct { + union { + ppa_sr_transaction_t *sr_desc; + ppa_blend_transaction_t *blend_desc; + }; + ppa_engine_t *ppa_engine; + ppa_trans_t *trans_elm; + dma2d_trigger_peripheral_t trigger_periph; + // dma2d_csc_config_t + // dma2d_strategy_config_t *dma_strategy; + // dma2d_transfer_ability_t *dma_transfer_ability; + // dma2d_rx_event_callbacks_t *event_cbs; +} ppa_dma2d_trans_on_picked_config_t; + +struct ppa_engine_t { + // ppa_group_t *group; + ppa_engine_type_t type; + portMUX_TYPE spinlock; + SemaphoreHandle_t sem; + bool in_accepting_trans_state; + // pending transactions queue? union ppa_sr_trans_config_t, ppa_blending_trans_config_t? handle when to free (at trans start or at trans end?) + STAILQ_HEAD(trans, ppa_trans_s) trans_stailq; // link head of pending transactions for the PPA engine + // callback func? Here or in the struct above? + // dma2d_rx_event_callbacks_t event_cbs; +}; + +typedef struct ppa_sr_engine_t { + ppa_engine_t base; + dma2d_descriptor_t *dma_tx_desc; + dma2d_descriptor_t *dma_rx_desc; +} ppa_sr_engine_t; + +typedef struct ppa_blend_engine_t { + ppa_engine_t base; + dma2d_descriptor_t *dma_tx_bg_desc; + dma2d_descriptor_t *dma_tx_fg_desc; + dma2d_descriptor_t *dma_rx_desc; +} ppa_blend_engine_t; + +// how to free and how to push next trans into dma2d queue after engine is free (callback triggered) +// + +// struct ppa_group_t { +// int group_id; +// portMUX_TYPE spinlock; +// ppa_hal_context_t hal; +// dma2d_pool_handle_t dma2d_pool_handle; +// ppa_sr_engine_t *sr; +// ppa_blend_engine_t *blending; +// uint32_t sr_engine_ref_count; +// uint32_t blend_engine_ref_count; +// }; + +typedef struct ppa_platform_t { + _lock_t mutex; // platform level mutex lock to protect the ppa_module_acquire/ppa_module_release process + portMUX_TYPE spinlock; // platform level spinlock + // ppa_group_t *group[PPA_LL_GROUPS]; // array of PPA group instances + ppa_hal_context_t hal; + dma2d_pool_handle_t dma2d_pool_handle; + ppa_sr_engine_t *sr; + ppa_blend_engine_t *blending; + uint32_t sr_engine_ref_count; + uint32_t blend_engine_ref_count; + uint32_t dma_desc_mem_size; +} ppa_platform_t; + +// PPA driver platform +static ppa_platform_t s_platform = { + .spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED, + // .group = {}, +}; + +#define PPA_MEM_ALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT) // TODO... + +// TODO: acquire pm_lock? +esp_err_t ppa_engine_acquire(const ppa_engine_config_t *config, ppa_engine_handle_t *ret_engine) +{ + esp_err_t ret = ESP_OK; + ESP_RETURN_ON_FALSE(config && ret_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + ESP_RETURN_ON_FALSE(config->engine == PPA_ENGINE_TYPE_SR || config->engine == PPA_ENGINE_TYPE_BLEND, ESP_ERR_INVALID_ARG, TAG, "invalid engine"); + + *ret_engine = NULL; + + uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); + size_t alignment = MAX(DMA2D_LL_DESC_ALIGNMENT, data_cache_line_size); + + _lock_acquire(&s_platform.mutex); + if (s_platform.dma_desc_mem_size == 0) { + s_platform.dma_desc_mem_size = ALIGN_UP(sizeof(dma2d_descriptor_align8_t), alignment); + } + + if (config->engine == PPA_ENGINE_TYPE_SR) { + if (!s_platform.sr) { + ppa_sr_engine_t *sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS); + SemaphoreHandle_t sr_sem = xSemaphoreCreateBinary(); + dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS); + dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS); + if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) { + sr_engine->dma_tx_desc = sr_tx_dma_desc; + sr_engine->dma_rx_desc = sr_rx_dma_desc; + // sr_engine->base.group = s_platform.group[group_id]; + sr_engine->base.type = PPA_ENGINE_TYPE_SR; + sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; + sr_engine->base.sem = sr_sem; + xSemaphoreGive(sr_engine->base.sem); + sr_engine->base.in_accepting_trans_state = true; + STAILQ_INIT(&sr_engine->base.trans_stailq); + // sr_engine->base.event_cbs + s_platform.sr = sr_engine; + s_platform.sr_engine_ref_count++; + *ret_engine = &sr_engine->base; + } else { + ret = ESP_ERR_NO_MEM; + ESP_LOGE(TAG, "no mem to register PPA SR engine"); + free(sr_engine); + if (sr_sem) { + vSemaphoreDelete(sr_sem); + } + free(sr_tx_dma_desc); + free(sr_rx_dma_desc); + } + } else { + // SR engine already registered + s_platform.sr_engine_ref_count++; + *ret_engine = &s_platform.sr->base; + } + } else if (config->engine == PPA_ENGINE_TYPE_BLEND) { + if (!s_platform.blending) { + ppa_blend_engine_t *blending_engine = heap_caps_calloc(1, sizeof(ppa_blend_engine_t), PPA_MEM_ALLOC_CAPS); + SemaphoreHandle_t blending_sem = xSemaphoreCreateBinary(); + dma2d_descriptor_t *blending_tx_bg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS); + dma2d_descriptor_t *blending_tx_fg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS); + dma2d_descriptor_t *blending_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS); + if (blending_engine && blending_sem && blending_tx_bg_dma_desc && blending_tx_fg_dma_desc && blending_rx_dma_desc) { + blending_engine->dma_tx_bg_desc = blending_tx_bg_dma_desc; + blending_engine->dma_tx_fg_desc = blending_tx_fg_dma_desc; + blending_engine->dma_rx_desc = blending_rx_dma_desc; + // blending_engine->base.group = s_platform.group[group_id]; + blending_engine->base.type = PPA_ENGINE_TYPE_BLEND; + blending_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; + blending_engine->base.sem = blending_sem; + xSemaphoreGive(blending_engine->base.sem); + blending_engine->base.in_accepting_trans_state = true; + STAILQ_INIT(&blending_engine->base.trans_stailq); + // blending_engine->base.event_cbs + s_platform.blending = blending_engine; + s_platform.blend_engine_ref_count++; + *ret_engine = &blending_engine->base; + } else { + ret = ESP_ERR_NO_MEM; + ESP_LOGE(TAG, "no mem to register PPA Blending engine"); + free(blending_engine); + if (blending_sem) { + vSemaphoreDelete(blending_sem); + } + free(blending_tx_bg_dma_desc); + free(blending_tx_fg_dma_desc); + free(blending_rx_dma_desc); + } + } else { + // Blending engine already registered + s_platform.blend_engine_ref_count++; + *ret_engine = &s_platform.blending->base; + } + } + + if (ret == ESP_OK) { + if (!s_platform.hal.dev) { + assert(!s_platform.dma2d_pool_handle); + + // Enable the bus clock to access PPA registers + PERIPH_RCC_ATOMIC() { + ppa_ll_enable_bus_clock(true); + ppa_ll_reset_register(); + } + + ppa_hal_init(&s_platform.hal); // initialize HAL context + + // Get 2D-DMA pool handle + dma2d_pool_config_t dma2d_config = { + .pool_id = 0, + }; + ret = dma2d_acquire_pool(&dma2d_config, &s_platform.dma2d_pool_handle); + if (ret != ESP_OK) { + ESP_LOGE(TAG, "install 2D-DMA failed"); + } + } + } + _lock_release(&s_platform.mutex); + + if (ret != ESP_OK && *ret_engine != NULL) { + ppa_engine_release(*ret_engine); + } + + return ret; +} + +esp_err_t ppa_engine_release(ppa_engine_handle_t ppa_engine) +{ + esp_err_t ret = ESP_OK; + ESP_RETURN_ON_FALSE(ppa_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + + _lock_acquire(&s_platform.mutex); + if (ppa_engine->type == PPA_ENGINE_TYPE_SR) { + ppa_sr_engine_t *sr_engine = __containerof(ppa_engine, ppa_sr_engine_t, base); + s_platform.sr_engine_ref_count--; + if (s_platform.sr_engine_ref_count == 0) { + // Stop accepting new transactions to SR engine + portENTER_CRITICAL(&sr_engine->base.spinlock); + sr_engine->base.in_accepting_trans_state = false; + portEXIT_CRITICAL(&sr_engine->base.spinlock); + // Wait until all transactions get processed + while (!STAILQ_EMPTY(&sr_engine->base.trans_stailq)); // TODO: Think twice, looks like I am not able to use engine semaphore to decide + // Now, time to free + s_platform.sr = NULL; + free(sr_engine->dma_tx_desc); + free(sr_engine->dma_rx_desc); + vSemaphoreDelete(sr_engine->base.sem); + free(sr_engine); + } + } else if (ppa_engine->type == PPA_ENGINE_TYPE_BLEND) { + ppa_blend_engine_t *blending_engine = __containerof(ppa_engine, ppa_blend_engine_t, base); + s_platform.blend_engine_ref_count--; + if (s_platform.blend_engine_ref_count == 0) { + // Stop accepting new transactions to blending engine + portENTER_CRITICAL(&blending_engine->base.spinlock); + blending_engine->base.in_accepting_trans_state = false; + portEXIT_CRITICAL(&blending_engine->base.spinlock); + // Wait until all transactions get processed + while (!STAILQ_EMPTY(&blending_engine->base.trans_stailq)); // TODO: Think twice, looks like I am not able to use engine semaphore to decide + // Now, time to free + s_platform.blending = NULL; + free(blending_engine->dma_tx_bg_desc); + free(blending_engine->dma_tx_fg_desc); + free(blending_engine->dma_rx_desc); + vSemaphoreDelete(blending_engine->base.sem); + free(blending_engine); + } + } + + if (!s_platform.sr && !s_platform.blending) { + assert(s_platform.sr_engine_ref_count == 0 && s_platform.blend_engine_ref_count == 0); + + if (s_platform.dma2d_pool_handle) { + dma2d_release_pool(s_platform.dma2d_pool_handle); // TODO: check return value. If not ESP_OK, then must be error on other 2D-DMA clients :( Give a warning log? + s_platform.dma2d_pool_handle = NULL; + } + + ppa_hal_deinit(&s_platform.hal); // De-initialize HAL context + + // Disable the bus clock to access PPA registers + PERIPH_RCC_ATOMIC() { + ppa_ll_enable_bus_clock(false); + } + } + _lock_release(&s_platform.mutex); + return ret; +} + +// // TODO: pm lock? +// esp_err_t ppa_module_acquire(const ppa_group_alloc_config_t *config, ppa_group_handle_t *ret_group) +// { +// esp_err_t ret = ESP_OK; +// ESP_RETURN_ON_FALSE(config && ret_group, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); +// ESP_RETURN_ON_FALSE(config->group_id < PPA_LL_GROUPS, ESP_ERR_INVALID_ARG, TAG, "invalid group_id"); + +// int group_id = config->group_id; + +// // _lock_acquire(&s_platform.mutex); +// // if (!s_platform.group[group_id]) { +// // ppa_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(ppa_group_t), PPA_MEM_ALLOC_CAPS); +// // if (pre_alloc_group) { +// // ppa_hal_init(&pre_alloc_group->hal, group_id); // initialize HAL context +// // pre_alloc_group->group_id = group_id; +// // pre_alloc_group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; +// // s_platform.group[group_id] = pre_alloc_group; // register to platform +// // // Enable the bus clock to access PPA registers +// // PERIPH_RCC_ATOMIC() { +// // ppa_ll_enable_bus_clock(group_id, true); +// // ppa_ll_reset_register(group_id); +// // } + +// // // Get 2D-DMA pool handle +// // dma2d_pool_config_t dma2d_config = { +// // .pool_id = 0, +// // }; +// // ret = dma2d_acquire_pool(&dma2d_config, &s_platform.group[group_id]->dma2d_pool_handle); +// // if (ret != ESP_OK) { +// // ESP_LOGE(TAG, "install 2D-DMA failed"); +// // // free(pre_alloc_group); +// // // s_platform.group[group_id] = NULL; +// // } +// // } else { +// // ret = ESP_ERR_NO_MEM; +// // } +// // } + +// // // Register PPA SR engine +// // if (ret == ESP_OK && config->sr_engine_en && !s_platform.group[group_id]->sr) { +// // ppa_sr_engine_t *sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS); +// // SemaphoreHandle_t sr_sem = xSemaphoreCreateBinary(); +// // dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API +// // dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); +// // if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) { +// // sr_engine->dma_tx_desc = sr_tx_dma_desc; +// // sr_engine->dma_rx_desc = sr_rx_dma_desc; +// // sr_engine->base.group = s_platform.group[group_id]; +// // sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; +// // sr_engine->base.sem = sr_sem; +// // xSemaphoreGive(sr_engine->base.sem); +// // sr_engine->base.in_accepting_trans_state = true; +// // STAILQ_INIT(&sr_engine->base.trans_stailq); +// // // sr_engine->base.event_cbs +// // s_platform.group[group_id]->sr = sr_engine; +// // } else { +// // ret = ESP_ERR_NO_MEM; +// // ESP_LOGE(TAG, "no mem to register PPA SR engine"); +// // free(sr_engine); +// // if (sr_sem) vSemaphoreDelete(sr_sem); +// // free(sr_tx_dma_desc); +// // free(sr_rx_dma_desc); +// // } +// // } + +// // // Register PPA Blending engine +// // if (ret == ESP_OK && config->blending_engine_en && !s_platform.group[group_id]->blending) { +// // ppa_blending_engine_t *blending_engine = heap_caps_calloc(1, sizeof(ppa_blending_engine_t), PPA_MEM_ALLOC_CAPS); +// // SemaphoreHandle_t blending_sem = xSemaphoreCreateBinary(); +// // dma2d_descriptor_t *blending_tx_bg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API +// // dma2d_descriptor_t *blending_tx_fg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); +// // dma2d_descriptor_t *blending_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); +// // if (blending_engine && blending_sem && blending_tx_bg_dma_desc && blending_tx_fg_dma_desc && blending_rx_dma_desc) { +// // blending_engine->dma_tx_bg_desc = blending_tx_bg_dma_desc; +// // blending_engine->dma_tx_fg_desc = blending_tx_fg_dma_desc; +// // blending_engine->dma_rx_desc = blending_rx_dma_desc; +// // blending_engine->base.group = s_platform.group[group_id]; +// // blending_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; +// // blending_engine->base.sem = blending_sem; +// // xSemaphoreGive(blending_engine->base.sem); +// // blending_engine->base.in_accepting_trans_state = true; +// // STAILQ_INIT(&blending_engine->base.trans_stailq); +// // // blending_engine->base.event_cbs +// // s_platform.group[group_id]->blending = blending_engine; +// // } else { +// // ret = ESP_ERR_NO_MEM; +// // ESP_LOGE(TAG, "no mem to register PPA Blending engine"); +// // free(blending_engine); +// // if (blending_sem) vSemaphoreDelete(blending_sem); +// // free(blending_tx_bg_dma_desc); +// // free(blending_tx_fg_dma_desc); +// // free(blending_rx_dma_desc); +// // } +// // } +// // _lock_release(&s_platform.mutex); + +// // ppa_module_release + +// bool new_group = false; +// bool new_sr_engine = false; +// bool new_blending_engine = false; +// ppa_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(ppa_group_t), PPA_MEM_ALLOC_CAPS); +// ppa_sr_engine_t *sr_engine = NULL; +// ppa_blend_engine_t *blending_engine = NULL; +// SemaphoreHandle_t sr_sem = NULL, blending_sem = NULL; + +// // portENTER_CRITICAL(&s_platform.spinlock); +// if (!s_platform.group[group_id]) { +// if (pre_alloc_group) { +// new_group = true; +// ppa_hal_init(&pre_alloc_group->hal, group_id); // initialize HAL context +// pre_alloc_group->group_id = group_id; +// pre_alloc_group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; +// s_platform.group[group_id] = pre_alloc_group; // register to platform +// // Enable the bus clock to access PPA registers +// PERIPH_RCC_ATOMIC() { +// ppa_ll_enable_bus_clock(group_id, true); +// ppa_ll_reset_register(group_id); +// } +// } else { +// ret = ESP_ERR_NO_MEM; +// } +// } +// // portEXIT_CRITICAL(&s_platform.spinlock); + +// if (new_group) { +// // Get 2D-DMA pool handle +// dma2d_pool_config_t dma2d_config = { +// .pool_id = 0, +// }; +// ret = dma2d_acquire_pool(&dma2d_config, &s_platform.group[group_id]->dma2d_pool_handle); +// if (ret != ESP_OK) { +// ESP_LOGE(TAG, "install 2D-DMA failed"); +// goto err; +// } +// } + +// if (ret == ESP_OK && config->sr_engine_en) { +// sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS); +// sr_sem = xSemaphoreCreateBinary(); +// dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API +// dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); +// // Register PPA SR engine +// portENTER_CRITICAL(&s_platform.group[group_id]->spinlock); +// if (!s_platform.group[group_id]->sr) { +// if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) { +// new_sr_engine = true; +// sr_engine->dma_tx_desc = sr_tx_dma_desc; +// sr_engine->dma_rx_desc = sr_rx_dma_desc; +// sr_engine->base.group = s_platform.group[group_id]; +// sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; +// sr_engine->base.sem = sr_sem; +// xSemaphoreGive(sr_engine->base.sem); +// sr_engine->base.in_accepting_trans_state = true; +// STAILQ_INIT(&sr_engine->base.trans_stailq); +// // sr_engine->base.event_cbs +// s_platform.group[group_id]->sr = sr_engine; +// } else { +// ret = ESP_ERR_NO_MEM; +// } +// } +// portEXIT_CRITICAL(&s_platform.group[group_id]->spinlock); +// if (ret == ESP_ERR_NO_MEM) { +// ESP_LOGE(TAG, "no mem to register PPA SR engine"); +// } +// } + +// if (ret == ESP_OK && config->blending_engine_en) { +// blending_engine = heap_caps_calloc(1, sizeof(ppa_blend_engine_t), PPA_MEM_ALLOC_CAPS); +// blending_sem = xSemaphoreCreateBinary(); +// dma2d_descriptor_t *blending_tx_bg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API +// dma2d_descriptor_t *blending_tx_fg_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); +// dma2d_descriptor_t *blending_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); +// // Register PPA Blending engine +// portENTER_CRITICAL(&s_platform.group[group_id]->spinlock); +// if (!s_platform.group[group_id]->blending) { +// if (blending_engine && blending_sem && blending_tx_bg_dma_desc && blending_tx_fg_dma_desc && blending_rx_dma_desc) { +// new_blending_engine = true; +// blending_engine->dma_tx_bg_desc = blending_tx_bg_dma_desc; +// blending_engine->dma_tx_fg_desc = blending_tx_fg_dma_desc; +// blending_engine->dma_rx_desc = blending_rx_dma_desc; +// blending_engine->base.group = s_platform.group[group_id]; +// blending_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; +// blending_engine->base.sem = blending_sem; +// xSemaphoreGive(blending_engine->base.sem); +// blending_engine->base.in_accepting_trans_state = true; +// STAILQ_INIT(&blending_engine->base.trans_stailq); +// // blending_engine->base.event_cbs +// s_platform.group[group_id]->blending = blending_engine; +// } else { +// ret = ESP_ERR_NO_MEM; +// } +// } +// portEXIT_CRITICAL(&s_platform.group[group_id]->spinlock); +// if (ret == ESP_ERR_NO_MEM) { +// ESP_LOGE(TAG, "no mem to register PPA Blending engine"); +// } +// } + +// if (!new_sr_engine) { +// free(sr_engine); +// if (sr_sem) vSemaphoreDelete(sr_sem); +// // TODO: free desc +// } +// if (!new_blending_engine) { +// free(blending_engine); +// if (blending_sem) vSemaphoreDelete(blending_sem); +// // TODO: free desc +// } +// err: +// if (ret != ESP_OK) { +// if (new_group) { +// ppa_module_release(s_platform.group[group_id]); +// } +// } +// if (!new_group) { +// free(pre_alloc_group); +// } +// *ret_group = s_platform.group[group_id]; +// return ret; +// } + +// esp_err_t ppa_module_release(ppa_group_handle_t ppa_group) +// { +// esp_err_t ret = ESP_OK; +// ESP_RETURN_ON_FALSE(ppa_group, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + +// bool do_deinitialize = false; +// int group_id = ppa_group->group_id; +// ppa_sr_engine_t *sr_engine = ppa_group->sr; +// ppa_blend_engine_t *blending_engine = ppa_group->blending; +// bool sr_no_waiting_trans = true; +// bool blending_no_waiting_trans = true; + +// // portENTER_CRITICAL(&s_platform.spinlock); +// portENTER_CRITICAL(&ppa_group->spinlock); +// if (sr_engine) { +// sr_engine->base.in_accepting_trans_state = false; +// portENTER_CRITICAL(&sr_engine->base.spinlock); +// sr_no_waiting_trans = STAILQ_EMPTY(&sr_engine->base.trans_stailq); +// portEXIT_CRITICAL(&sr_engine->base.spinlock); +// } +// if (blending_engine) { +// blending_engine->base.in_accepting_trans_state = false; +// portENTER_CRITICAL(&blending_engine->base.spinlock); +// blending_no_waiting_trans = STAILQ_EMPTY(&blending_engine->base.trans_stailq); +// portEXIT_CRITICAL(&blending_engine->base.spinlock); +// } +// portEXIT_CRITICAL(&ppa_group->spinlock); +// if (sr_no_waiting_trans && blending_no_waiting_trans) { +// do_deinitialize = true; +// ppa_group->sr = NULL; +// ppa_group->blending = NULL; +// s_platform.group[group_id] = NULL; +// } else { +// ret = ESP_FAIL; +// } +// // portEXIT_CRITICAL(&s_platform.spinlock); + +// if (do_deinitialize) { +// if (sr_engine) { +// free(sr_engine->dma_tx_desc); +// free(sr_engine->dma_rx_desc); +// vSemaphoreDelete(sr_engine->base.sem); +// free(sr_engine); +// } +// if (blending_engine) { +// free(blending_engine->dma_tx_bg_desc); +// free(blending_engine->dma_tx_fg_desc); +// free(blending_engine->dma_rx_desc); +// vSemaphoreDelete(blending_engine->base.sem); +// free(blending_engine); +// } +// dma2d_release_pool(ppa_group->dma2d_pool_handle); +// // Disable the bus clock to access PPA registers +// PERIPH_RCC_ATOMIC() { +// ppa_ll_enable_bus_clock(group_id, false); +// } +// free(ppa_group); +// } +// return ret; +// } + +// Each PPA engine should only have one transaction being pushed to 2D-DMA queue, the rest transactions should stay in engine's own transaction queue. +// This is to avoid 2D-DMA channels being hold, but not actually being used (waiting for PPA engine to be free) +static esp_err_t ppa_dma2d_enqueue(const ppa_trans_t *trans_elm) +{ + return dma2d_enqueue(s_platform.dma2d_pool_handle, trans_elm->trans_desc, trans_elm->dma_trans_placeholder); +} + +static void ppa_recycle_transaction(ppa_trans_t *trans_elm) +{ + if (trans_elm) { + if (trans_elm->trans_desc) { + ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)trans_elm->trans_desc->user_config; + if (trans_on_picked_desc) { + if (trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SR) { + free(trans_on_picked_desc->sr_desc); + } else if (trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_BLEND) { + free(trans_on_picked_desc->blend_desc); + } + free(trans_on_picked_desc); + } + free(trans_elm->trans_desc); + } + if (trans_elm->sem) { + vSemaphoreDelete(trans_elm->sem); + } + free(trans_elm); + } +} + +static bool ppa_sr_transaction_done_cb(dma2d_channel_handle_t dma2d_chan, dma2d_event_data_t *event_data, void *user_data) +{ + bool need_yield = false; + BaseType_t HPTaskAwoken; + ppa_trans_t *trans_elm = (ppa_trans_t *)user_data; + ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)trans_elm->trans_desc->user_config; + assert(trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SR); + ppa_sr_engine_t *sr_engine = __containerof(trans_on_picked_desc->ppa_engine, ppa_sr_engine_t, base); + // ppa_group_t *ppa_group = sr_engine->base.group; + + ppa_trans_t *next_start_trans = NULL; + portENTER_CRITICAL_ISR(&sr_engine->base.spinlock); + // Remove this transaction from transaction queue + STAILQ_REMOVE(&sr_engine->base.trans_stailq, trans_elm, ppa_trans_s, entry); + next_start_trans = STAILQ_FIRST(&sr_engine->base.trans_stailq); + portEXIT_CRITICAL_ISR(&sr_engine->base.spinlock); + + // If there is next trans in PPA engine queue, send it to DMA queue; otherwise, release the engine semaphore + if (next_start_trans) { + esp_rom_printf("from ISR -"); + ppa_dma2d_enqueue(next_start_trans); + } else { + xSemaphoreGiveFromISR(sr_engine->base.sem, &HPTaskAwoken); + need_yield |= (HPTaskAwoken == pdTRUE); + } + esp_rom_printf("trans addr: 0x%x\n", trans_elm); + // Recycle transaction or give transaction semaphore + if (trans_elm->sem != NULL) { + xSemaphoreGiveFromISR(trans_elm->sem, &HPTaskAwoken); + need_yield |= (HPTaskAwoken == pdTRUE); + } else { + ppa_recycle_transaction(trans_elm); + } + + // TODO: how to notify non-blocking transaction + + return need_yield; +} + +static bool ppa_sr_transaction_on_picked(uint32_t channel_num, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config) +{ + assert(channel_num == 2 && dma2d_chans && user_config); + ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)user_config; + assert(trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SR && trans_on_picked_desc->sr_desc && trans_on_picked_desc->ppa_engine); + + ppa_sr_transaction_t *sr_trans_desc = trans_on_picked_desc->sr_desc; + ppa_sr_engine_t *sr_engine = __containerof(trans_on_picked_desc->ppa_engine, ppa_sr_engine_t, base); + // ppa_group_t *ppa_group = sr_engine->base.group; + + // Free 2D-DMA transaction placeholder (transaction has already been moved out from 2D-DMA queue) + free(trans_on_picked_desc->trans_elm->dma_trans_placeholder); + + // Get the required 2D-DMA channel handles + uint32_t dma2d_tx_chan_idx = 0; + uint32_t dma2d_rx_chan_idx = 1; + if (dma2d_chans[0].dir == DMA2D_CHANNEL_DIRECTION_RX) { + dma2d_tx_chan_idx = 1; + dma2d_rx_chan_idx = 0; + } + dma2d_channel_handle_t dma2d_tx_chan = dma2d_chans[dma2d_tx_chan_idx].chan; + dma2d_channel_handle_t dma2d_rx_chan = dma2d_chans[dma2d_rx_chan_idx].chan; + + // Write back and invalidate are performed on the entire picture (the window content is not continuous in the buffer) + // Write back in_buffer + uint32_t in_buffer_len = sr_trans_desc->in_pic_w * sr_trans_desc->in_pic_h * color_hal_pixel_format_get_bit_depth((color_space_pixel_format_t) { + .color_type_id = sr_trans_desc->in_color.mode + }) / 8; + esp_cache_msync(sr_trans_desc->in_buffer, in_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M); + // Invalidate out_buffer + uint32_t out_buffer_len = sr_trans_desc->out_pic_w * sr_trans_desc->out_pic_h * color_hal_pixel_format_get_bit_depth((color_space_pixel_format_t) { + .color_type_id = sr_trans_desc->out_color.mode + }) / 8; + esp_cache_msync(sr_trans_desc->out_buffer, out_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C); + + // Fill 2D-DMA descriptors + sr_engine->dma_tx_desc->vb_size = sr_trans_desc->in_block_h; + sr_engine->dma_tx_desc->hb_length = sr_trans_desc->in_block_w; + sr_engine->dma_tx_desc->err_eof = 0; + sr_engine->dma_tx_desc->dma2d_en = 1; + sr_engine->dma_tx_desc->suc_eof = 1; + sr_engine->dma_tx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA; + sr_engine->dma_tx_desc->va_size = sr_trans_desc->in_pic_h; + sr_engine->dma_tx_desc->ha_length = sr_trans_desc->in_pic_w; + sr_engine->dma_tx_desc->pbyte = dma2d_desc_pixel_format_to_pbyte_value((color_space_pixel_format_t) { + .color_type_id = sr_trans_desc->in_color.mode + }); // check in 912 whether this field can be ignored (911 seems cannot) No! Why? + sr_engine->dma_tx_desc->y = sr_trans_desc->in_block_offset_y; + sr_engine->dma_tx_desc->x = sr_trans_desc->in_block_offset_x; + sr_engine->dma_tx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE; + sr_engine->dma_tx_desc->buffer = (void *)sr_trans_desc->in_buffer; + sr_engine->dma_tx_desc->next = NULL; + + // sr_engine->dma_rx_desc->vb_size = sr_trans_desc->in_block_h; // check in 912 whether this field can be ignored (911 seems cannot) No! Why? + // sr_engine->dma_rx_desc->hb_length = sr_trans_desc->in_block_w; // check in 912 whether this field can be ignored (911 seems cannot) No! Why? + sr_engine->dma_rx_desc->vb_size = 1; + sr_engine->dma_rx_desc->hb_length = 1; + sr_engine->dma_rx_desc->err_eof = 0; + sr_engine->dma_rx_desc->dma2d_en = 1; + sr_engine->dma_rx_desc->suc_eof = 1; + sr_engine->dma_rx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA; + sr_engine->dma_rx_desc->va_size = sr_trans_desc->out_pic_h; + sr_engine->dma_rx_desc->ha_length = sr_trans_desc->out_pic_w; + sr_engine->dma_rx_desc->pbyte = dma2d_desc_pixel_format_to_pbyte_value((color_space_pixel_format_t) { + .color_type_id = sr_trans_desc->out_color.mode + }); // check in 912 whether this field can be ignored (911 seems cannot) No! Why? + sr_engine->dma_rx_desc->y = sr_trans_desc->out_block_offset_y; + sr_engine->dma_rx_desc->x = sr_trans_desc->out_block_offset_x; + sr_engine->dma_rx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE; + sr_engine->dma_rx_desc->buffer = (void *)sr_trans_desc->out_buffer; + sr_engine->dma_rx_desc->next = NULL; + + esp_cache_msync((void *)sr_engine->dma_tx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); + esp_cache_msync((void *)sr_engine->dma_rx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); + + // Configure 2D-DMA channels + dma2d_trigger_t trig_periph = { + .periph = DMA2D_TRIG_PERIPH_PPA_SR, + .periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SR_TX, + }; + dma2d_connect(dma2d_tx_chan, &trig_periph); + trig_periph.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SR_RX; + dma2d_connect(dma2d_rx_chan, &trig_periph); + + dma2d_transfer_ability_t dma_transfer_ability = { + .data_burst_length = DMA2D_DATA_BURST_LENGTH_128, + .desc_burst_en = true, + .mb_size = DMA2D_MACRO_BLOCK_SIZE_NONE, + }; + dma2d_set_transfer_ability(dma2d_tx_chan, &dma_transfer_ability); + dma2d_set_transfer_ability(dma2d_rx_chan, &dma_transfer_ability); + + // TODO: configuring this doesn't seem helping anything? Shouldn't it related to descriptor pbyte? + dma2d_strategy_config_t dma_strategy = { + .auto_update_desc = true, + }; + dma2d_apply_strategy(dma2d_tx_chan, &dma_strategy); + + // YUV444 and YUV422 are not supported by PPA module, need to utilize 2D-DMA color space conversion feature to do a conversion + ppa_sr_color_mode_t ppa_in_color_mode = sr_trans_desc->in_color.mode; + if (ppa_in_color_mode == PPA_SR_COLOR_MODE_YUV444) { + ppa_in_color_mode = PPA_SR_COLOR_MODE_RGB888; + dma2d_csc_config_t dma_tx_csc = {0}; + if (sr_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) { + dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_601; + } else { + dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_709; + } + dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc); + } else if (ppa_in_color_mode == PPA_SR_COLOR_MODE_YUV422) { + ppa_in_color_mode = PPA_SR_COLOR_MODE_RGB888; + dma2d_csc_config_t dma_tx_csc = {0}; + if (sr_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) { + dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_601; + } else { + dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_709; + } + dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc); + } + + ppa_sr_color_mode_t ppa_out_color_mode = sr_trans_desc->out_color.mode; + if (ppa_out_color_mode == PPA_SR_COLOR_MODE_YUV444) { + ppa_out_color_mode = PPA_SR_COLOR_MODE_YUV420; + dma2d_csc_config_t dma_rx_csc = { + .rx_csc_option = DMA2D_CSC_RX_YUV420_TO_YUV444, + }; + dma2d_configure_color_space_conversion(dma2d_rx_chan, &dma_rx_csc); + } + + dma2d_rx_event_callbacks_t dma_event_cbs = { + .on_recv_eof = ppa_sr_transaction_done_cb, + }; + dma2d_register_rx_event_callbacks(dma2d_rx_chan, &dma_event_cbs, (void *)trans_on_picked_desc->trans_elm); + + ppa_ll_sr_reset(s_platform.hal.dev); + + dma2d_set_desc_addr(dma2d_tx_chan, (intptr_t)sr_engine->dma_tx_desc); + dma2d_set_desc_addr(dma2d_rx_chan, (intptr_t)sr_engine->dma_rx_desc); + dma2d_start(dma2d_tx_chan); + dma2d_start(dma2d_rx_chan); + + // Configure PPA SR engine + ppa_ll_sr_set_rx_color_mode(s_platform.hal.dev, ppa_in_color_mode); + if (COLOR_SPACE_TYPE(ppa_in_color_mode) == COLOR_SPACE_YUV) { + ppa_ll_sr_set_rx_yuv_range(s_platform.hal.dev, sr_trans_desc->in_color.yuv_range); + ppa_ll_sr_set_yuv2rgb_std(s_platform.hal.dev, sr_trans_desc->in_color.yuv_std); + } + ppa_ll_sr_enable_rx_byte_swap(s_platform.hal.dev, sr_trans_desc->in_color.byte_swap); + ppa_ll_sr_enable_rx_rgb_swap(s_platform.hal.dev, sr_trans_desc->in_color.rgb_swap); + + ppa_ll_sr_set_tx_color_mode(s_platform.hal.dev, ppa_out_color_mode); + if (COLOR_SPACE_TYPE(ppa_out_color_mode) == COLOR_SPACE_YUV) { + ppa_ll_sr_set_rx_yuv_range(s_platform.hal.dev, sr_trans_desc->out_color.yuv_range); + ppa_ll_sr_set_yuv2rgb_std(s_platform.hal.dev, sr_trans_desc->out_color.yuv_std); + } + + ppa_ll_sr_configure_rx_alpha(s_platform.hal.dev, sr_trans_desc->alpha_mode, sr_trans_desc->alpha_value); + + // TODO: sr_macro_bk_ro_bypass + ppa_ll_sr_set_rotation_angle(s_platform.hal.dev, sr_trans_desc->rotation_angle); + ppa_ll_sr_set_scaling_x(s_platform.hal.dev, (uint32_t)sr_trans_desc->scale_x, (uint32_t)(sr_trans_desc->scale_x * (PPA_LL_SR_SCALING_FRAG_MAX + 1)) & PPA_LL_SR_SCALING_FRAG_MAX); + ppa_ll_sr_set_scaling_y(s_platform.hal.dev, (uint32_t)sr_trans_desc->scale_y, (uint32_t)(sr_trans_desc->scale_y * (PPA_LL_SR_SCALING_FRAG_MAX + 1)) & PPA_LL_SR_SCALING_FRAG_MAX); + ppa_ll_sr_enable_mirror_x(s_platform.hal.dev, sr_trans_desc->mirror_x); + ppa_ll_sr_enable_mirror_y(s_platform.hal.dev, sr_trans_desc->mirror_y); + + ppa_ll_sr_start(s_platform.hal.dev); + + // dma2d_start(dma2d_tx_chan); + // dma2d_start(dma2d_rx_chan); + + // No need to yield + return false; +} + +esp_err_t ppa_do_scale_and_rotate(ppa_engine_handle_t ppa_engine, const ppa_sr_trans_config_t *config, ppa_trans_mode_t mode) +{ + esp_err_t ret = ESP_OK; + ESP_RETURN_ON_FALSE(ppa_engine && config, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); + ESP_RETURN_ON_FALSE(ppa_engine->type == PPA_ENGINE_TYPE_SR, ESP_ERR_INVALID_ARG, TAG, "wrong engine handle"); + ESP_RETURN_ON_FALSE(mode <= PPA_TRANS_MODE_NON_BLOCKING, ESP_ERR_INVALID_ARG, TAG, "invalid mode"); + // Any restrictions on in/out buffer address? alignment? alignment restriction comes from cache, its addr and size need to be aligned to cache line size on 912! + // buffer on stack/heap + // ESP_RETURN_ON_FALSE(config->rotation_angle) + // ESP_RETURN_ON_FALSE(config->in/out_color_mode) + // what if in_color is YUV420, out is RGB, what is out RGB range? Full range? + ESP_RETURN_ON_FALSE(config->scale_x < (PPA_LL_SR_SCALING_INT_MAX + 1) && config->scale_x >= (1.0 / PPA_LL_SR_SCALING_FRAG_MAX) && + config->scale_y < (PPA_LL_SR_SCALING_INT_MAX + 1) && config->scale_y >= (1.0 / PPA_LL_SR_SCALING_FRAG_MAX), + ESP_ERR_INVALID_ARG, TAG, "invalid scale"); + // byte/rgb swap with color mode only to (A)RGB color space? + + ppa_sr_engine_t *sr_engine = __containerof(ppa_engine, ppa_sr_engine_t, base); + // ESP_RETURN_ON_FALSE(sr_engine, ESP_FAIL, TAG, "SR engine not registered, please register through ppa_module_acquire first"); + + ppa_trans_t *new_trans_elm = (ppa_trans_t *)heap_caps_calloc(1, sizeof(ppa_trans_t), PPA_MEM_ALLOC_CAPS); + dma2d_trans_t *dma_trans_elm = (dma2d_trans_t *)heap_caps_calloc(1, SIZEOF_DMA2D_TRANS_T, PPA_MEM_ALLOC_CAPS); + dma2d_trans_config_t *dma_trans_desc = (dma2d_trans_config_t *)heap_caps_calloc(1, sizeof(dma2d_trans_config_t), PPA_MEM_ALLOC_CAPS); + ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)heap_caps_calloc(1, sizeof(ppa_dma2d_trans_on_picked_config_t), PPA_MEM_ALLOC_CAPS); + ppa_sr_transaction_t *ppa_trans_desc = (ppa_sr_transaction_t *)heap_caps_calloc(1, sizeof(ppa_sr_transaction_t), PPA_MEM_ALLOC_CAPS); + ESP_GOTO_ON_FALSE(new_trans_elm && dma_trans_elm && dma_trans_desc && trans_on_picked_desc && ppa_trans_desc, ESP_ERR_NO_MEM, err, TAG, "no mem for transaction storage"); + if (mode == PPA_TRANS_MODE_BLOCKING) { + new_trans_elm->sem = xSemaphoreCreateBinary(); + ESP_GOTO_ON_FALSE(new_trans_elm->sem, ESP_ERR_NO_MEM, err, TAG, "no mem for transaction storage"); + } + esp_rom_printf("new trans addr: 0x%x\n", new_trans_elm); + memcpy(ppa_trans_desc, config, sizeof(ppa_sr_trans_config_t)); + + trans_on_picked_desc->sr_desc = ppa_trans_desc; + trans_on_picked_desc->ppa_engine = &sr_engine->base; + trans_on_picked_desc->trans_elm = new_trans_elm; + trans_on_picked_desc->trigger_periph = DMA2D_TRIG_PERIPH_PPA_SR; + + dma_trans_desc->tx_channel_num = 1; + dma_trans_desc->rx_channel_num = 1; + // TODO: reserved channels + dma_trans_desc->user_config = (void *)trans_on_picked_desc; + dma_trans_desc->on_job_picked = ppa_sr_transaction_on_picked; + + new_trans_elm->trans_desc = dma_trans_desc; + new_trans_elm->dma_trans_placeholder = dma_trans_elm; + + portENTER_CRITICAL(&sr_engine->base.spinlock); + if (sr_engine->base.in_accepting_trans_state) { + // Send transaction into PPA SR engine queue + STAILQ_INSERT_TAIL(&sr_engine->base.trans_stailq, new_trans_elm, entry); + } else { + ret = ESP_FAIL; + } + portEXIT_CRITICAL(&sr_engine->base.spinlock); + + if (ret != ESP_OK) { + ESP_LOGE(TAG, "SR engine cannot accept transaction now"); + goto err; + } + + TickType_t ticks_to_wait = (mode == PPA_TRANS_MODE_NON_BLOCKING) ? 0 : portMAX_DELAY; + if (xSemaphoreTake(sr_engine->base.sem, ticks_to_wait) == pdTRUE) { + // Check if the transaction has already been started from the ISR + // If so, then the transaction should have been removed from queue at this moment (transaction completed) + bool found = false; + ppa_trans_t *temp = NULL; + portENTER_CRITICAL(&sr_engine->base.spinlock); + STAILQ_FOREACH(temp, &sr_engine->base.trans_stailq, entry) { + if (temp == new_trans_elm) { + found = true; + break; + } + } + portEXIT_CRITICAL(&sr_engine->base.spinlock); + if (found) { + ret = ppa_dma2d_enqueue(new_trans_elm); + if (ret != ESP_OK) { + portENTER_CRITICAL(&sr_engine->base.spinlock); + STAILQ_REMOVE(&sr_engine->base.trans_stailq, new_trans_elm, ppa_trans_s, entry); + portEXIT_CRITICAL(&sr_engine->base.spinlock); + xSemaphoreGive(sr_engine->base.sem); + goto err; + } + } else { + xSemaphoreGive(sr_engine->base.sem); + } + } + + if (mode == PPA_TRANS_MODE_BLOCKING) { + xSemaphoreTake(new_trans_elm->sem, portMAX_DELAY); // Given in the ISR + // Sanity check new_trans_elm not in trans_stailq anymore? (loop takes time tho) + ppa_recycle_transaction(new_trans_elm); + } + +err: + if (ret != ESP_OK) { + ppa_recycle_transaction(new_trans_elm); + } + return ret; +} + +esp_err_t ppa_do_blend(ppa_engine_handle_t ppa_engine, const ppa_blend_trans_config_t *config, ppa_trans_mode_t mode) +{ + return ESP_OK; +} + +esp_err_t ppa_do_fill(ppa_engine_handle_t ppa_engine, const ppa_fill_trans_config_t *config, ppa_trans_mode_t mode) +{ + return ESP_OK; +} diff --git a/components/hal/esp32p4/include/hal/ppa_ll.h b/components/hal/esp32p4/include/hal/ppa_ll.h index 7ab8249bbd..0b3e82f4d7 100644 --- a/components/hal/esp32p4/include/hal/ppa_ll.h +++ b/components/hal/esp32p4/include/hal/ppa_ll.h @@ -24,15 +24,8 @@ extern "C" { #define PPA_LL_BLEND0_CLUT_MEM_ADDR_OFFSET 0x400 #define PPA_LL_BLEND1_CLUT_MEM_ADDR_OFFSET 0x800 -/** - * @brief Enumeration of alpha value transformation mode - */ -typedef enum { - PPA_LL_RX_ALPHA_NO_CHANGE, /*!< Do not replace alpha value. If input format does not contain alpha info, alpha value 255 will be used. */ - PPA_LL_RX_ALPHA_FIX_VALUE, /*!< Replace the alpha value in received pixel with a new, fixed alpha value */ - PPA_LL_RX_ALPHA_SCALE, /*!< Scale the alpha value in received pixel to be a new alpha value */ - PPA_LL_RX_ALPHA_INVERT, /*!< Invert the alpha value in received pixel */ -} ppa_ll_rx_alpha_mode_t; +#define PPA_LL_SR_SCALING_INT_MAX PPA_SR_SCAL_X_INT_V +#define PPA_LL_SR_SCALING_FRAG_MAX PPA_SR_SCAL_X_FRAG_V /** * @brief Enumeration of PPA blending mode @@ -231,7 +224,7 @@ static inline void ppa_ll_sr_set_tx_color_mode(ppa_dev_t *dev, ppa_sr_color_mode } /** - * @brief Set YUV to RGB protocol when PPA SR pixel color space conversion from RX to TX is YUV to RGB + * @brief Set YUV to RGB protocol when PPA SR RX pixel color space is YUV * * @param dev Peripheral instance address * @param std One of the RGB-YUV conversion standards in color_conv_std_rgb_yuv_t @@ -252,7 +245,7 @@ static inline void ppa_ll_sr_set_yuv2rgb_std(ppa_dev_t *dev, color_conv_std_rgb_ } /** - * @brief Set RGB to YUV protocol when PPA SR pixel color space conversion from RX to TX is RGB to YUV + * @brief Set RGB to YUV protocol when PPA SR TX pixel color space is YUV * * @param dev Peripheral instance address * @param std One of the RGB-YUV conversion standards in color_conv_std_rgb_yuv_t @@ -342,31 +335,32 @@ static inline void ppa_ll_sr_enable_rx_byte_swap(ppa_dev_t *dev, bool enable) * @brief Configure PPA SR alpha value transformation mode * * @param dev Peripheral instance address - * @param mode Alpha value transformation mode, one of the values in ppa_ll_rx_alpha_mode_t - * @param val When PPA_LL_RX_ALPHA_FIX_VALUE mode is selected, val is the alpha value to be replaced with (output_alpha = val) - * When PPA_LL_RX_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256) + * @param mode Alpha value transformation mode, one of the values in ppa_alpha_mode_t + * @param val When PPA_ALPHA_FIX_VALUE mode is selected, val is the alpha value to be replaced with (output_alpha = val) + * When PPA_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256) * When other modes are selected, this field is not used */ -static inline void ppa_ll_sr_configure_rx_alpha(ppa_dev_t *dev, ppa_ll_rx_alpha_mode_t mode, uint32_t val) +static inline void ppa_ll_sr_configure_rx_alpha(ppa_dev_t *dev, ppa_alpha_mode_t mode, uint32_t val) { switch (mode) { - case PPA_LL_RX_ALPHA_NO_CHANGE: + case PPA_ALPHA_NO_CHANGE: dev->sr_fix_alpha.sr_rx_alpha_mod = 0; dev->sr_fix_alpha.sr_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_FIX_VALUE: + case PPA_ALPHA_FIX_VALUE: dev->sr_fix_alpha.sr_rx_alpha_mod = 1; HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sr_fix_alpha, sr_rx_fix_alpha, val); dev->sr_fix_alpha.sr_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_SCALE: + case PPA_ALPHA_SCALE: dev->sr_fix_alpha.sr_rx_alpha_mod = 2; HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sr_fix_alpha, sr_rx_fix_alpha, val); dev->sr_fix_alpha.sr_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_INVERT: + case PPA_ALPHA_INVERT: dev->sr_fix_alpha.sr_rx_alpha_mod = 0; dev->sr_fix_alpha.sr_rx_alpha_inv = 1; + break; default: // Unsupported alpha transformation mode abort(); @@ -570,31 +564,32 @@ static inline void ppa_ll_blend_enable_rx_fg_byte_swap(ppa_dev_t *dev, bool enab * @brief Configure PPA blending input background alpha value transformation mode * * @param dev Peripheral instance address - * @param mode Alpha value transformation mode, one of the values in ppa_ll_rx_alpha_mode_t - * @param val When PPA_LL_RX_ALPHA_FIX_VALUE mode is selected, val is the alpha value to be replaced with (output_alpha = val) - * When PPA_LL_RX_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256) + * @param mode Alpha value transformation mode, one of the values in ppa_alpha_mode_t + * @param val When PPA_ALPHA_FIX_VALUE mode is selected, val is the alpha value to be replaced with (output_alpha = val) + * When PPA_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256) * When other modes are selected, this field is not used */ -static inline void ppa_ll_blend_configure_rx_bg_alpha(ppa_dev_t *dev, ppa_ll_rx_alpha_mode_t mode, uint32_t val) +static inline void ppa_ll_blend_configure_rx_bg_alpha(ppa_dev_t *dev, ppa_alpha_mode_t mode, uint32_t val) { switch (mode) { - case PPA_LL_RX_ALPHA_NO_CHANGE: + case PPA_ALPHA_NO_CHANGE: dev->blend_fix_alpha.blend0_rx_alpha_mod = 0; dev->blend_fix_alpha.blend0_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_FIX_VALUE: + case PPA_ALPHA_FIX_VALUE: dev->blend_fix_alpha.blend0_rx_alpha_mod = 1; HAL_FORCE_MODIFY_U32_REG_FIELD(dev->blend_fix_alpha, blend0_rx_fix_alpha, val); dev->blend_fix_alpha.blend0_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_SCALE: + case PPA_ALPHA_SCALE: dev->blend_fix_alpha.blend0_rx_alpha_mod = 2; HAL_FORCE_MODIFY_U32_REG_FIELD(dev->blend_fix_alpha, blend0_rx_fix_alpha, val); dev->blend_fix_alpha.blend0_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_INVERT: + case PPA_ALPHA_INVERT: dev->blend_fix_alpha.blend0_rx_alpha_mod = 0; dev->blend_fix_alpha.blend0_rx_alpha_inv = 1; + break; default: // Unsupported alpha transformation mode abort(); @@ -605,31 +600,32 @@ static inline void ppa_ll_blend_configure_rx_bg_alpha(ppa_dev_t *dev, ppa_ll_rx_ * @brief Configure PPA blending input foreground alpha value transformation mode * * @param dev Peripheral instance address - * @param mode Alpha value transformation mode, one of the values in ppa_ll_rx_alpha_mode_t - * @param val When PPA_LL_RX_ALPHA_FIX_VALUE mode is selected, val is the alpha value to be replaced with (output_alpha = val) - * When PPA_LL_RX_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256) + * @param mode Alpha value transformation mode, one of the values in ppa_alpha_mode_t + * @param val When PPA_ALPHA_FIX_VALUE mode is selected, val is the alpha value to be replaced with (output_alpha = val) + * When PPA_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256) * When other modes are selected, this field is not used */ -static inline void ppa_ll_blend_configure_rx_fg_alpha(ppa_dev_t *dev, ppa_ll_rx_alpha_mode_t mode, uint32_t val) +static inline void ppa_ll_blend_configure_rx_fg_alpha(ppa_dev_t *dev, ppa_alpha_mode_t mode, uint32_t val) { switch (mode) { - case PPA_LL_RX_ALPHA_NO_CHANGE: + case PPA_ALPHA_NO_CHANGE: dev->blend_fix_alpha.blend1_rx_alpha_mod = 0; dev->blend_fix_alpha.blend1_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_FIX_VALUE: + case PPA_ALPHA_FIX_VALUE: dev->blend_fix_alpha.blend1_rx_alpha_mod = 1; HAL_FORCE_MODIFY_U32_REG_FIELD(dev->blend_fix_alpha, blend1_rx_fix_alpha, val); dev->blend_fix_alpha.blend1_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_SCALE: + case PPA_ALPHA_SCALE: dev->blend_fix_alpha.blend1_rx_alpha_mod = 2; HAL_FORCE_MODIFY_U32_REG_FIELD(dev->blend_fix_alpha, blend1_rx_fix_alpha, val); dev->blend_fix_alpha.blend1_rx_alpha_inv = 0; break; - case PPA_LL_RX_ALPHA_INVERT: + case PPA_ALPHA_INVERT: dev->blend_fix_alpha.blend1_rx_alpha_mod = 0; dev->blend_fix_alpha.blend1_rx_alpha_inv = 1; + break; default: // Unsupported alpha transformation mode abort(); diff --git a/components/hal/include/hal/dma2d_types.h b/components/hal/include/hal/dma2d_types.h index 835fcf139c..f19ef2d816 100644 --- a/components/hal/include/hal/dma2d_types.h +++ b/components/hal/include/hal/dma2d_types.h @@ -174,6 +174,10 @@ typedef enum { // B = 1.164 *(Y - 16) + 2.114 *(Cb - 128) // //*********************BT709***********************************// +// R/G/B [0 ... 255] +// Y [16 ... 235] +// Cb/Cr [16 ... 240] + // 256 * Q = A[9:0] * x + B[10:0] * y + C[9:0] * z + D[17:0] #define DMA2D_COLOR_SPACE_CONV_PARAM_RGB2YUV_BT601 \ diff --git a/components/hal/include/hal/ppa_types.h b/components/hal/include/hal/ppa_types.h index 1be3e6c9e8..2f9d2678b5 100644 --- a/components/hal/include/hal/ppa_types.h +++ b/components/hal/include/hal/ppa_types.h @@ -39,6 +39,11 @@ typedef enum { PPA_SR_COLOR_MODE_RGB888 = COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB888), /*!< PPA SR color mode: RGB888 */ PPA_SR_COLOR_MODE_RGB565 = COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB565), /*!< PPA SR color mode: RGB565 */ PPA_SR_COLOR_MODE_YUV420 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV420), /*!< PPA SR color mode: YUV420 */ + PPA_SR_COLOR_MODE_YUV444 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV444), /*!< PPA SR color mode: YUV444 (limited range only)*/ + PPA_SR_COLOR_MODE_YUV422 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV422), /*!< PPA SR color mode: YUV422 (input only, limited range only) */ + // YUV444 and YUV422 not supported by PPA hardware, but seems like we can use 2D-DMA to do conversion before sending into and after coming out from the PPA module + // If in_pic is YUV444/422, then TX DMA channnel could do DMA2D_CSC_TX_YUV444/422_TO_RGB888_601/709, so PPA in_color_mode is RGB888 + // If out_pic is YUV444, then RX DMA channel could do DMA2D_CSC_RX_YUV420_TO_YUV444, so PPA out_color_mode is YUV420 } ppa_sr_color_mode_t; /** @@ -54,6 +59,16 @@ typedef enum { PPA_BLEND_COLOR_MODE_A4 = COLOR_TYPE_ID(COLOR_SPACE_ALPHA, COLOR_PIXEL_A4), /*!< PPA Blending color mode: A4, only available on blending foreground input */ } ppa_blend_color_mode_t; +/** + * @brief Enumeration of PPA alpha compositing mode + */ +typedef enum { + PPA_ALPHA_NO_CHANGE = 0, /*!< Do not replace alpha value. If input format does not contain alpha info, alpha value 255 will be used. */ + PPA_ALPHA_FIX_VALUE, /*!< Replace the alpha value in received pixel with a new, fixed alpha value */ + PPA_ALPHA_SCALE, /*!< Scale the alpha value in received pixel to be a new alpha value */ + PPA_ALPHA_INVERT, /*!< Invert the alpha value in received pixel */ +} ppa_alpha_mode_t; + #ifdef __cplusplus } #endif diff --git a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in index caea62cf0a..d84bf8885a 100644 --- a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in @@ -267,6 +267,10 @@ config SOC_GP_LDO_SUPPORTED bool default y +config SOC_PPA_SUPPORTED + bool + default y + config SOC_LIGHT_SLEEP_SUPPORTED bool default y diff --git a/components/soc/esp32p4/include/soc/dma2d_channel.h b/components/soc/esp32p4/include/soc/dma2d_channel.h index f960fb1c4a..ed4ed0c8c5 100644 --- a/components/soc/esp32p4/include/soc/dma2d_channel.h +++ b/components/soc/esp32p4/include/soc/dma2d_channel.h @@ -14,6 +14,6 @@ #define SOC_DMA2D_TRIG_PERIPH_JPEG_TX (0) #define SOC_DMA2D_TRIG_PERIPH_PPA_SR_TX (1) -#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_FG_TX (2) -#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_BG_TX (3) +#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_BG_TX (2) +#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_FG_TX (3) #define SOC_DMA2D_TRIG_PERIPH_M2M_TX (-1) // Any value of 4 ~ 7, TX and RX do not have to use same ID value for M2M diff --git a/components/soc/esp32p4/include/soc/soc_caps.h b/components/soc/esp32p4/include/soc/soc_caps.h index 069710449f..389059740b 100644 --- a/components/soc/esp32p4/include/soc/soc_caps.h +++ b/components/soc/esp32p4/include/soc/soc_caps.h @@ -88,7 +88,7 @@ // #define SOC_TOUCH_SENSOR_SUPPORTED 1 //TODO: IDF-7477 #define SOC_RNG_SUPPORTED 1 #define SOC_GP_LDO_SUPPORTED 1 // General purpose LDO -// #define SOC_PPA_SUPPORTED 1 //TODO: IDF-6878 +#define SOC_PPA_SUPPORTED 1 #define SOC_LIGHT_SLEEP_SUPPORTED 1 #define SOC_DEEP_SLEEP_SUPPORTED 1 #define SOC_PM_SUPPORTED 1