feat(ppa): add PPA driver support for ESP32P4

Renamed SR to SRM
This commit is contained in:
Song Ruo Jing
2024-03-04 20:50:29 +08:00
parent 368ef8b472
commit a86e03cba3
8 changed files with 313 additions and 302 deletions

View File

@@ -25,7 +25,7 @@ typedef struct ppa_invoker_t *ppa_invoker_handle_t;
* These flags are supposed to be used to specify the PPA operations that are going to be used by the invoker, so that * These flags are supposed to be used to specify the PPA operations that are going to be used by the invoker, so that
* the corresponding engines can be acquired when registering the invoker with `ppa_register_invoker`. * the corresponding engines can be acquired when registering the invoker with `ppa_register_invoker`.
*/ */
#define PPA_OPERATION_FLAG_SR (1 << 0) #define PPA_OPERATION_FLAG_SRM (1 << 0)
#define PPA_OPERATION_FLAG_BLEND (1 << 1) #define PPA_OPERATION_FLAG_BLEND (1 << 1)
#define PPA_OPERATION_FLAG_FILL (1 << 2) #define PPA_OPERATION_FLAG_FILL (1 << 2)
@@ -75,7 +75,7 @@ typedef struct {
ppa_trans_mode_t mode; /*!< Determines whether to block inside the operation functions, see `ppa_trans_mode_t` */ ppa_trans_mode_t mode; /*!< Determines whether to block inside the operation functions, see `ppa_trans_mode_t` */
} ppa_trans_config_t; } ppa_trans_config_t;
#define PPA_SR_OPERATION_CONFIG struct { \ #define PPA_SRM_OPERATION_CONFIG struct { \
void *in_buffer; /*!< TODO: could be a buffer list, link descriptors together, process a batch void *in_buffer; /*!< TODO: could be a buffer list, link descriptors together, process a batch
uint32_t batch_num; However, is it necessary? psram can not store too many pictures */ \ uint32_t batch_num; However, is it necessary? psram can not store too many pictures */ \
uint32_t in_pic_w; \ uint32_t in_pic_w; \
@@ -91,14 +91,14 @@ typedef struct {
uint32_t out_block_offset_x; \ uint32_t out_block_offset_x; \
uint32_t out_block_offset_y; \ uint32_t out_block_offset_y; \
\ \
ppa_sr_rotation_angle_t rotation_angle; \ ppa_srm_rotation_angle_t rotation_angle; \
float scale_x; \ float scale_x; \
float scale_y; \ float scale_y; \
bool mirror_x; \ bool mirror_x; \
bool mirror_y; \ bool mirror_y; \
\ \
struct { \ struct { \
ppa_sr_color_mode_t mode; \ ppa_srm_color_mode_t mode; \
color_range_t yuv_range; \ color_range_t yuv_range; \
color_conv_std_rgb_yuv_t yuv_std; \ color_conv_std_rgb_yuv_t yuv_std; \
bool rgb_swap; \ bool rgb_swap; \
@@ -110,22 +110,22 @@ typedef struct {
} in_color; \ } in_color; \
\ \
struct { \ struct { \
ppa_sr_color_mode_t mode; \ ppa_srm_color_mode_t mode; \
color_range_t yuv_range; \ color_range_t yuv_range; \
color_conv_std_rgb_yuv_t yuv_std; \ color_conv_std_rgb_yuv_t yuv_std; \
} out_color; \ } out_color; \
} }
/** /**
* @brief A collection of configuration items to perform a PPA SR operation * @brief A collection of configuration items to perform a PPA SRM operation
*/ */
typedef PPA_SR_OPERATION_CONFIG ppa_sr_operation_config_t; typedef PPA_SRM_OPERATION_CONFIG ppa_srm_operation_config_t;
/** /**
* @brief Perform a scaling-and-rotating (SR) operation to a picture * @brief Perform a scaling-rotating-mirroring (SRM) operation to a picture
* *
* @param[in] ppa_invoker PPA invoker handle that has acquired the PPA SR engine * @param[in] ppa_invoker PPA invoker handle that has acquired the PPA SRM engine
* @param[in] oper_config Pointer to a collection of configurations for the SR operation, ppa_sr_operation_config_t * @param[in] oper_config Pointer to a collection of configurations for the SRM operation, ppa_srm_operation_config_t
* @param[in] trans_config Pointer to a collection of configurations for the transaction, ppa_trans_config_t * @param[in] trans_config Pointer to a collection of configurations for the transaction, ppa_trans_config_t
* *
* @return * @return
@@ -134,7 +134,7 @@ typedef PPA_SR_OPERATION_CONFIG ppa_sr_operation_config_t;
* - ESP_ERR_NO_MEM: * - ESP_ERR_NO_MEM:
* - ESP_FAIL: * - ESP_FAIL:
*/ */
esp_err_t ppa_do_scale_and_rotate(ppa_invoker_handle_t ppa_invoker, const ppa_sr_operation_config_t *oper_config, const ppa_trans_config_t *trans_config); esp_err_t ppa_do_scale_rotate_mirror(ppa_invoker_handle_t ppa_invoker, const ppa_srm_operation_config_t *oper_config, const ppa_trans_config_t *trans_config);
typedef struct { typedef struct {
void *in_bg_buffer; void *in_bg_buffer;
@@ -234,7 +234,7 @@ esp_err_t ppa_do_fill(ppa_invoker_handle_t ppa_invoker, const ppa_fill_operation
// argb color conversion (bypass blend) // argb color conversion (bypass blend)
// SR and Blending are independent, can work at the same time // SRM and Blending are independent, can work at the same time
// Fill is in blend, so fill and blend cannot work at the same time // Fill is in blend, so fill and blend cannot work at the same time
// Consider blocking and non-blocking options // Consider blocking and non-blocking options

View File

@@ -49,19 +49,19 @@ void ppa_hal_deinit(ppa_hal_context_t *hal)
hal->dev = NULL; hal->dev = NULL;
} }
// PPA module contains SR engine and Blending engine // PPA module contains SRM engine and Blending engine
typedef struct ppa_engine_t ppa_engine_t; typedef struct ppa_engine_t ppa_engine_t;
typedef struct ppa_invoker_t ppa_invoker_t; typedef struct ppa_invoker_t ppa_invoker_t;
typedef struct { typedef struct {
PPA_SR_OPERATION_CONFIG; PPA_SRM_OPERATION_CONFIG;
uint32_t scale_x_int; uint32_t scale_x_int;
uint32_t scale_x_frag; uint32_t scale_x_frag;
uint32_t scale_y_int; uint32_t scale_y_int;
uint32_t scale_y_frag; uint32_t scale_y_frag;
} ppa_sr_oper_t; } ppa_srm_oper_t;
typedef ppa_blend_operation_config_t ppa_blend_oper_t; typedef ppa_blend_operation_config_t ppa_blend_oper_t;
@@ -77,7 +77,7 @@ typedef struct ppa_trans_s {
typedef struct { typedef struct {
union { union {
ppa_sr_oper_t *sr_desc; ppa_srm_oper_t *srm_desc;
ppa_blend_oper_t *blend_desc; ppa_blend_oper_t *blend_desc;
ppa_fill_oper_t *fill_desc; ppa_fill_oper_t *fill_desc;
void *op_desc; void *op_desc;
@@ -97,11 +97,11 @@ struct ppa_engine_t {
// dma2d_rx_event_callbacks_t event_cbs; // dma2d_rx_event_callbacks_t event_cbs;
}; };
typedef struct ppa_sr_engine_t { typedef struct ppa_srm_engine_t {
ppa_engine_t base; ppa_engine_t base;
dma2d_descriptor_t *dma_tx_desc; dma2d_descriptor_t *dma_tx_desc;
dma2d_descriptor_t *dma_rx_desc; dma2d_descriptor_t *dma_rx_desc;
} ppa_sr_engine_t; } ppa_srm_engine_t;
typedef struct ppa_blend_engine_t { typedef struct ppa_blend_engine_t {
ppa_engine_t base; ppa_engine_t base;
@@ -111,9 +111,9 @@ typedef struct ppa_blend_engine_t {
} ppa_blend_engine_t; } ppa_blend_engine_t;
struct ppa_invoker_t { struct ppa_invoker_t {
ppa_engine_t *sr_engine; ppa_engine_t *srm_engine;
ppa_engine_t *blending_engine; ppa_engine_t *blending_engine;
uint32_t sr_trans_cnt; uint32_t srm_trans_cnt;
uint32_t blending_trans_cnt; uint32_t blending_trans_cnt;
portMUX_TYPE spinlock; portMUX_TYPE spinlock;
bool in_accepting_trans_state; bool in_accepting_trans_state;
@@ -122,9 +122,10 @@ struct ppa_invoker_t {
}; };
typedef enum { typedef enum {
PPA_OPERATION_SR, PPA_OPERATION_SRM,
PPA_OPERATION_BLEND, PPA_OPERATION_BLEND,
PPA_OPERATION_FILL, PPA_OPERATION_FILL,
PPA_OPERATION_NUM,
} ppa_operation_t; } ppa_operation_t;
typedef struct ppa_platform_t { typedef struct ppa_platform_t {
@@ -132,9 +133,9 @@ typedef struct ppa_platform_t {
portMUX_TYPE spinlock; // platform level spinlock portMUX_TYPE spinlock; // platform level spinlock
ppa_hal_context_t hal; ppa_hal_context_t hal;
dma2d_pool_handle_t dma2d_pool_handle; dma2d_pool_handle_t dma2d_pool_handle;
ppa_sr_engine_t *sr; ppa_srm_engine_t *srm;
ppa_blend_engine_t *blending; ppa_blend_engine_t *blending;
uint32_t sr_engine_ref_count; uint32_t srm_engine_ref_count;
uint32_t blend_engine_ref_count; uint32_t blend_engine_ref_count;
uint32_t dma_desc_mem_size; uint32_t dma_desc_mem_size;
} ppa_platform_t; } ppa_platform_t;
@@ -152,10 +153,16 @@ typedef struct {
static esp_err_t ppa_engine_acquire(const ppa_engine_config_t *config, ppa_engine_t **ret_engine); static esp_err_t ppa_engine_acquire(const ppa_engine_config_t *config, ppa_engine_t **ret_engine);
static esp_err_t ppa_engine_release(ppa_engine_t *ppa_engine); static esp_err_t ppa_engine_release(ppa_engine_t *ppa_engine);
static bool ppa_sr_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config); static bool ppa_srm_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config);
static bool ppa_blend_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config); static bool ppa_blend_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config);
static bool ppa_fill_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config); static bool ppa_fill_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config);
const dma2d_trans_on_picked_callback_t ppa_oper_trans_on_picked_func[PPA_OPERATION_NUM] = {
ppa_srm_transaction_on_picked,
ppa_blend_transaction_on_picked,
ppa_fill_transaction_on_picked,
};
// extern uint32_t dma2d_tx_channel_reserved_mask[SOC_DMA2D_GROUPS]; // extern uint32_t dma2d_tx_channel_reserved_mask[SOC_DMA2D_GROUPS];
// extern uint32_t dma2d_rx_channel_reserved_mask[SOC_DMA2D_GROUPS]; // extern uint32_t dma2d_rx_channel_reserved_mask[SOC_DMA2D_GROUPS];
// static uint32_t ppa_specified_tx_channel_mask = 0; // static uint32_t ppa_specified_tx_channel_mask = 0;
@@ -173,7 +180,7 @@ static esp_err_t ppa_engine_acquire(const ppa_engine_config_t *config, ppa_engin
{ {
esp_err_t ret = ESP_OK; esp_err_t ret = ESP_OK;
ESP_RETURN_ON_FALSE(config && ret_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); ESP_RETURN_ON_FALSE(config && ret_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(config->engine == PPA_ENGINE_TYPE_SR || config->engine == PPA_ENGINE_TYPE_BLEND, ESP_ERR_INVALID_ARG, TAG, "invalid engine"); ESP_RETURN_ON_FALSE(config->engine == PPA_ENGINE_TYPE_SRM || config->engine == PPA_ENGINE_TYPE_BLEND, ESP_ERR_INVALID_ARG, TAG, "invalid engine");
*ret_engine = NULL; *ret_engine = NULL;
@@ -185,39 +192,42 @@ static esp_err_t ppa_engine_acquire(const ppa_engine_config_t *config, ppa_engin
s_platform.dma_desc_mem_size = ALIGN_UP(sizeof(dma2d_descriptor_align8_t), alignment); s_platform.dma_desc_mem_size = ALIGN_UP(sizeof(dma2d_descriptor_align8_t), alignment);
} }
if (config->engine == PPA_ENGINE_TYPE_SR) { if (config->engine == PPA_ENGINE_TYPE_SRM) {
if (!s_platform.sr) { if (!s_platform.srm) {
ppa_sr_engine_t *sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS); ppa_srm_engine_t *srm_engine = heap_caps_calloc(1, sizeof(ppa_srm_engine_t), PPA_MEM_ALLOC_CAPS);
SemaphoreHandle_t sr_sem = xSemaphoreCreateBinaryWithCaps(PPA_MEM_ALLOC_CAPS); SemaphoreHandle_t srm_sem = xSemaphoreCreateBinaryWithCaps(PPA_MEM_ALLOC_CAPS);
dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS); dma2d_descriptor_t *srm_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS);
dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS); dma2d_descriptor_t *srm_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(alignment, 1, s_platform.dma_desc_mem_size, MALLOC_CAP_DMA | PPA_MEM_ALLOC_CAPS);
if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) { if (srm_engine && srm_sem && srm_tx_dma_desc && srm_rx_dma_desc) {
sr_engine->dma_tx_desc = sr_tx_dma_desc; srm_engine->dma_tx_desc = srm_tx_dma_desc;
sr_engine->dma_rx_desc = sr_rx_dma_desc; srm_engine->dma_rx_desc = srm_rx_dma_desc;
sr_engine->base.type = PPA_ENGINE_TYPE_SR; srm_engine->base.type = PPA_ENGINE_TYPE_SRM;
sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; srm_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
sr_engine->base.sem = sr_sem; srm_engine->base.sem = srm_sem;
xSemaphoreGive(sr_engine->base.sem); xSemaphoreGive(srm_engine->base.sem);
// sr_engine->base.in_accepting_trans_state = true; // srm_engine->base.in_accepting_trans_state = true;
STAILQ_INIT(&sr_engine->base.trans_stailq); STAILQ_INIT(&srm_engine->base.trans_stailq);
// sr_engine->base.event_cbs // srm_engine->base.event_cbs
s_platform.sr = sr_engine; s_platform.srm = srm_engine;
s_platform.sr_engine_ref_count++; s_platform.srm_engine_ref_count++;
*ret_engine = &sr_engine->base; *ret_engine = &srm_engine->base;
// TODO: Register PPA interrupt? Useful for SRM parameter error. If SRM parameter error, blocks at 2D-DMA, transaction can never finish, stuck...
// need a way to force end
} else { } else {
ret = ESP_ERR_NO_MEM; ret = ESP_ERR_NO_MEM;
ESP_LOGE(TAG, "no mem to register PPA SR engine"); ESP_LOGE(TAG, "no mem to register PPA SRM engine");
free(sr_engine); free(srm_engine);
if (sr_sem) { if (srm_sem) {
vSemaphoreDeleteWithCaps(sr_sem); vSemaphoreDeleteWithCaps(srm_sem);
} }
free(sr_tx_dma_desc); free(srm_tx_dma_desc);
free(sr_rx_dma_desc); free(srm_rx_dma_desc);
} }
} else { } else {
// SR engine already registered // SRM engine already registered
s_platform.sr_engine_ref_count++; s_platform.srm_engine_ref_count++;
*ret_engine = &s_platform.sr->base; *ret_engine = &s_platform.srm->base;
} }
} else if (config->engine == PPA_ENGINE_TYPE_BLEND) { } else if (config->engine == PPA_ENGINE_TYPE_BLEND) {
if (!s_platform.blending) { if (!s_platform.blending) {
@@ -295,23 +305,23 @@ static esp_err_t ppa_engine_release(ppa_engine_t *ppa_engine)
ESP_RETURN_ON_FALSE(ppa_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); ESP_RETURN_ON_FALSE(ppa_engine, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
_lock_acquire(&s_platform.mutex); _lock_acquire(&s_platform.mutex);
if (ppa_engine->type == PPA_ENGINE_TYPE_SR) { if (ppa_engine->type == PPA_ENGINE_TYPE_SRM) {
ppa_sr_engine_t *sr_engine = __containerof(ppa_engine, ppa_sr_engine_t, base); ppa_srm_engine_t *srm_engine = __containerof(ppa_engine, ppa_srm_engine_t, base);
s_platform.sr_engine_ref_count--; s_platform.srm_engine_ref_count--;
if (s_platform.sr_engine_ref_count == 0) { if (s_platform.srm_engine_ref_count == 0) {
// // Stop accepting new transactions to SR engine // // Stop accepting new transactions to SRM engine
// portENTER_CRITICAL(&sr_engine->base.spinlock); // portENTER_CRITICAL(&srm_engine->base.spinlock);
// sr_engine->base.in_accepting_trans_state = false; // srm_engine->base.in_accepting_trans_state = false;
// portEXIT_CRITICAL(&sr_engine->base.spinlock); // portEXIT_CRITICAL(&srm_engine->base.spinlock);
// // Wait until all transactions get processed // // Wait until all transactions get processed
// while (!STAILQ_EMPTY(&sr_engine->base.trans_stailq)); // TODO: Think twice, looks like I am not able to use engine semaphore to decide // while (!STAILQ_EMPTY(&srm_engine->base.trans_stailq)); // TODO: Think twice, looks like I am not able to use engine semaphore to decide
assert(STAILQ_EMPTY(&sr_engine->base.trans_stailq)); assert(STAILQ_EMPTY(&srm_engine->base.trans_stailq));
// Now, time to free // Now, time to free
s_platform.sr = NULL; s_platform.srm = NULL;
free(sr_engine->dma_tx_desc); free(srm_engine->dma_tx_desc);
free(sr_engine->dma_rx_desc); free(srm_engine->dma_rx_desc);
vSemaphoreDeleteWithCaps(sr_engine->base.sem); vSemaphoreDeleteWithCaps(srm_engine->base.sem);
free(sr_engine); free(srm_engine);
} }
} else if (ppa_engine->type == PPA_ENGINE_TYPE_BLEND) { } else if (ppa_engine->type == PPA_ENGINE_TYPE_BLEND) {
ppa_blend_engine_t *blending_engine = __containerof(ppa_engine, ppa_blend_engine_t, base); ppa_blend_engine_t *blending_engine = __containerof(ppa_engine, ppa_blend_engine_t, base);
@@ -334,8 +344,8 @@ static esp_err_t ppa_engine_release(ppa_engine_t *ppa_engine)
} }
} }
if (!s_platform.sr && !s_platform.blending) { if (!s_platform.srm && !s_platform.blending) {
assert(s_platform.sr_engine_ref_count == 0 && s_platform.blend_engine_ref_count == 0); assert(s_platform.srm_engine_ref_count == 0 && s_platform.blend_engine_ref_count == 0);
if (s_platform.dma2d_pool_handle) { if (s_platform.dma2d_pool_handle) {
dma2d_release_pool(s_platform.dma2d_pool_handle); // TODO: check return value. If not ESP_OK, then must be error on other 2D-DMA clients :( Give a warning log? dma2d_release_pool(s_platform.dma2d_pool_handle); // TODO: check return value. If not ESP_OK, then must be error on other 2D-DMA clients :( Give a warning log?
@@ -363,11 +373,11 @@ esp_err_t ppa_register_invoker(const ppa_invoker_config_t *config, ppa_invoker_h
invoker->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; invoker->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
invoker->in_accepting_trans_state = true; invoker->in_accepting_trans_state = true;
if (config->operation_flag & PPA_OPERATION_FLAG_SR) { if (config->operation_flag & PPA_OPERATION_FLAG_SRM) {
ppa_engine_config_t engine_config = { ppa_engine_config_t engine_config = {
.engine = PPA_ENGINE_TYPE_SR, .engine = PPA_ENGINE_TYPE_SRM,
}; };
ESP_GOTO_ON_ERROR(ppa_engine_acquire(&engine_config, &invoker->sr_engine), err, TAG, "unable to acquire SR engine"); ESP_GOTO_ON_ERROR(ppa_engine_acquire(&engine_config, &invoker->srm_engine), err, TAG, "unable to acquire SRM engine");
} }
if (config->operation_flag & PPA_OPERATION_FLAG_BLEND || config->operation_flag & PPA_OPERATION_FLAG_FILL) { if (config->operation_flag & PPA_OPERATION_FLAG_BLEND || config->operation_flag & PPA_OPERATION_FLAG_FILL) {
ppa_engine_config_t engine_config = { ppa_engine_config_t engine_config = {
@@ -390,15 +400,15 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
bool do_unregister = false; bool do_unregister = false;
portENTER_CRITICAL(&ppa_invoker->spinlock); portENTER_CRITICAL(&ppa_invoker->spinlock);
if (ppa_invoker->sr_trans_cnt == 0 && ppa_invoker->blending_trans_cnt == 0) { if (ppa_invoker->srm_trans_cnt == 0 && ppa_invoker->blending_trans_cnt == 0) {
ppa_invoker->in_accepting_trans_state = false; ppa_invoker->in_accepting_trans_state = false;
do_unregister = true; do_unregister = true;
} }
portEXIT_CRITICAL(&ppa_invoker->spinlock); portEXIT_CRITICAL(&ppa_invoker->spinlock);
ESP_RETURN_ON_FALSE(do_unregister, ESP_ERR_INVALID_STATE, TAG, "invoker still has unprocessed trans"); ESP_RETURN_ON_FALSE(do_unregister, ESP_ERR_INVALID_STATE, TAG, "invoker still has unprocessed trans");
if (ppa_invoker->sr_engine) { if (ppa_invoker->srm_engine) {
ppa_engine_release(ppa_invoker->sr_engine); ppa_engine_release(ppa_invoker->srm_engine);
} }
if (ppa_invoker->blending_engine) { if (ppa_invoker->blending_engine) {
ppa_engine_release(ppa_invoker->blending_engine); ppa_engine_release(ppa_invoker->blending_engine);
@@ -445,30 +455,30 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
// // } // // }
// // } // // }
// // // Register PPA SR engine // // // Register PPA SRM engine
// // if (ret == ESP_OK && config->sr_engine_en && !s_platform.group[group_id]->sr) { // // if (ret == ESP_OK && config->srm_engine_en && !s_platform.group[group_id]->srm) {
// // ppa_sr_engine_t *sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS); // // ppa_srm_engine_t *srm_engine = heap_caps_calloc(1, sizeof(ppa_srm_engine_t), PPA_MEM_ALLOC_CAPS);
// // SemaphoreHandle_t sr_sem = xSemaphoreCreateBinary(); // // SemaphoreHandle_t srm_sem = xSemaphoreCreateBinary();
// // dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API // // dma2d_descriptor_t *srm_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API
// // dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // // dma2d_descriptor_t *srm_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
// // if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) { // // if (srm_engine && srm_sem && srm_tx_dma_desc && srm_rx_dma_desc) {
// // sr_engine->dma_tx_desc = sr_tx_dma_desc; // // srm_engine->dma_tx_desc = srm_tx_dma_desc;
// // sr_engine->dma_rx_desc = sr_rx_dma_desc; // // srm_engine->dma_rx_desc = srm_rx_dma_desc;
// // sr_engine->base.group = s_platform.group[group_id]; // // srm_engine->base.group = s_platform.group[group_id];
// // sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; // // srm_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
// // sr_engine->base.sem = sr_sem; // // srm_engine->base.sem = srm_sem;
// // xSemaphoreGive(sr_engine->base.sem); // // xSemaphoreGive(srm_engine->base.sem);
// // sr_engine->base.in_accepting_trans_state = true; // // srm_engine->base.in_accepting_trans_state = true;
// // STAILQ_INIT(&sr_engine->base.trans_stailq); // // STAILQ_INIT(&srm_engine->base.trans_stailq);
// // // sr_engine->base.event_cbs // // // srm_engine->base.event_cbs
// // s_platform.group[group_id]->sr = sr_engine; // // s_platform.group[group_id]->srm = srm_engine;
// // } else { // // } else {
// // ret = ESP_ERR_NO_MEM; // // ret = ESP_ERR_NO_MEM;
// // ESP_LOGE(TAG, "no mem to register PPA SR engine"); // // ESP_LOGE(TAG, "no mem to register PPA SRM engine");
// // free(sr_engine); // // free(srm_engine);
// // if (sr_sem) vSemaphoreDelete(sr_sem); // // if (srm_sem) vSemaphoreDelete(srm_sem);
// // free(sr_tx_dma_desc); // // free(srm_tx_dma_desc);
// // free(sr_rx_dma_desc); // // free(srm_rx_dma_desc);
// // } // // }
// // } // // }
@@ -506,12 +516,12 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
// // ppa_module_release // // ppa_module_release
// bool new_group = false; // bool new_group = false;
// bool new_sr_engine = false; // bool new_srm_engine = false;
// bool new_blending_engine = false; // bool new_blending_engine = false;
// ppa_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(ppa_group_t), PPA_MEM_ALLOC_CAPS); // ppa_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(ppa_group_t), PPA_MEM_ALLOC_CAPS);
// ppa_sr_engine_t *sr_engine = NULL; // ppa_srm_engine_t *srm_engine = NULL;
// ppa_blend_engine_t *blending_engine = NULL; // ppa_blend_engine_t *blending_engine = NULL;
// SemaphoreHandle_t sr_sem = NULL, blending_sem = NULL; // SemaphoreHandle_t srm_sem = NULL, blending_sem = NULL;
// // portENTER_CRITICAL(&s_platform.spinlock); // // portENTER_CRITICAL(&s_platform.spinlock);
// if (!s_platform.group[group_id]) { // if (!s_platform.group[group_id]) {
@@ -544,33 +554,33 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
// } // }
// } // }
// if (ret == ESP_OK && config->sr_engine_en) { // if (ret == ESP_OK && config->srm_engine_en) {
// sr_engine = heap_caps_calloc(1, sizeof(ppa_sr_engine_t), PPA_MEM_ALLOC_CAPS); // srm_engine = heap_caps_calloc(1, sizeof(ppa_srm_engine_t), PPA_MEM_ALLOC_CAPS);
// sr_sem = xSemaphoreCreateBinary(); // srm_sem = xSemaphoreCreateBinary();
// dma2d_descriptor_t *sr_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API // dma2d_descriptor_t *srm_tx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // TODO: get cache line size by API
// dma2d_descriptor_t *sr_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS); // dma2d_descriptor_t *srm_rx_dma_desc = (dma2d_descriptor_t *)heap_caps_aligned_calloc(64, 1, 64, PPA_MEM_ALLOC_CAPS);
// // Register PPA SR engine // // Register PPA SRM engine
// portENTER_CRITICAL(&s_platform.group[group_id]->spinlock); // portENTER_CRITICAL(&s_platform.group[group_id]->spinlock);
// if (!s_platform.group[group_id]->sr) { // if (!s_platform.group[group_id]->srm) {
// if (sr_engine && sr_sem && sr_tx_dma_desc && sr_rx_dma_desc) { // if (srm_engine && srm_sem && srm_tx_dma_desc && srm_rx_dma_desc) {
// new_sr_engine = true; // new_srm_engine = true;
// sr_engine->dma_tx_desc = sr_tx_dma_desc; // srm_engine->dma_tx_desc = srm_tx_dma_desc;
// sr_engine->dma_rx_desc = sr_rx_dma_desc; // srm_engine->dma_rx_desc = srm_rx_dma_desc;
// sr_engine->base.group = s_platform.group[group_id]; // srm_engine->base.group = s_platform.group[group_id];
// sr_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED; // srm_engine->base.spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
// sr_engine->base.sem = sr_sem; // srm_engine->base.sem = srm_sem;
// xSemaphoreGive(sr_engine->base.sem); // xSemaphoreGive(srm_engine->base.sem);
// sr_engine->base.in_accepting_trans_state = true; // srm_engine->base.in_accepting_trans_state = true;
// STAILQ_INIT(&sr_engine->base.trans_stailq); // STAILQ_INIT(&srm_engine->base.trans_stailq);
// // sr_engine->base.event_cbs // // srm_engine->base.event_cbs
// s_platform.group[group_id]->sr = sr_engine; // s_platform.group[group_id]->srm = srm_engine;
// } else { // } else {
// ret = ESP_ERR_NO_MEM; // ret = ESP_ERR_NO_MEM;
// } // }
// } // }
// portEXIT_CRITICAL(&s_platform.group[group_id]->spinlock); // portEXIT_CRITICAL(&s_platform.group[group_id]->spinlock);
// if (ret == ESP_ERR_NO_MEM) { // if (ret == ESP_ERR_NO_MEM) {
// ESP_LOGE(TAG, "no mem to register PPA SR engine"); // ESP_LOGE(TAG, "no mem to register PPA SRM engine");
// } // }
// } // }
@@ -606,9 +616,9 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
// } // }
// } // }
// if (!new_sr_engine) { // if (!new_srm_engine) {
// free(sr_engine); // free(srm_engine);
// if (sr_sem) vSemaphoreDelete(sr_sem); // if (srm_sem) vSemaphoreDelete(srm_sem);
// // TODO: free desc // // TODO: free desc
// } // }
// if (!new_blending_engine) { // if (!new_blending_engine) {
@@ -636,18 +646,18 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
// bool do_deinitialize = false; // bool do_deinitialize = false;
// int group_id = ppa_group->group_id; // int group_id = ppa_group->group_id;
// ppa_sr_engine_t *sr_engine = ppa_group->sr; // ppa_srm_engine_t *srm_engine = ppa_group->srm;
// ppa_blend_engine_t *blending_engine = ppa_group->blending; // ppa_blend_engine_t *blending_engine = ppa_group->blending;
// bool sr_no_waiting_trans = true; // bool srm_no_waiting_trans = true;
// bool blending_no_waiting_trans = true; // bool blending_no_waiting_trans = true;
// // portENTER_CRITICAL(&s_platform.spinlock); // // portENTER_CRITICAL(&s_platform.spinlock);
// portENTER_CRITICAL(&ppa_group->spinlock); // portENTER_CRITICAL(&ppa_group->spinlock);
// if (sr_engine) { // if (srm_engine) {
// sr_engine->base.in_accepting_trans_state = false; // srm_engine->base.in_accepting_trans_state = false;
// portENTER_CRITICAL(&sr_engine->base.spinlock); // portENTER_CRITICAL(&srm_engine->base.spinlock);
// sr_no_waiting_trans = STAILQ_EMPTY(&sr_engine->base.trans_stailq); // srm_no_waiting_trans = STAILQ_EMPTY(&srm_engine->base.trans_stailq);
// portEXIT_CRITICAL(&sr_engine->base.spinlock); // portEXIT_CRITICAL(&srm_engine->base.spinlock);
// } // }
// if (blending_engine) { // if (blending_engine) {
// blending_engine->base.in_accepting_trans_state = false; // blending_engine->base.in_accepting_trans_state = false;
@@ -656,9 +666,9 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
// portEXIT_CRITICAL(&blending_engine->base.spinlock); // portEXIT_CRITICAL(&blending_engine->base.spinlock);
// } // }
// portEXIT_CRITICAL(&ppa_group->spinlock); // portEXIT_CRITICAL(&ppa_group->spinlock);
// if (sr_no_waiting_trans && blending_no_waiting_trans) { // if (srm_no_waiting_trans && blending_no_waiting_trans) {
// do_deinitialize = true; // do_deinitialize = true;
// ppa_group->sr = NULL; // ppa_group->srm = NULL;
// ppa_group->blending = NULL; // ppa_group->blending = NULL;
// s_platform.group[group_id] = NULL; // s_platform.group[group_id] = NULL;
// } else { // } else {
@@ -667,11 +677,11 @@ esp_err_t ppa_unregister_invoker(ppa_invoker_handle_t ppa_invoker)
// // portEXIT_CRITICAL(&s_platform.spinlock); // // portEXIT_CRITICAL(&s_platform.spinlock);
// if (do_deinitialize) { // if (do_deinitialize) {
// if (sr_engine) { // if (srm_engine) {
// free(sr_engine->dma_tx_desc); // free(srm_engine->dma_tx_desc);
// free(sr_engine->dma_rx_desc); // free(srm_engine->dma_rx_desc);
// vSemaphoreDelete(sr_engine->base.sem); // vSemaphoreDelete(srm_engine->base.sem);
// free(sr_engine); // free(srm_engine);
// } // }
// if (blending_engine) { // if (blending_engine) {
// free(blending_engine->dma_tx_bg_desc); // free(blending_engine->dma_tx_bg_desc);
@@ -724,7 +734,7 @@ static esp_err_t ppa_prepare_trans_elm(ppa_invoker_handle_t ppa_invoker, ppa_eng
dma2d_trans_t *dma_trans_elm = (dma2d_trans_t *)heap_caps_calloc(1, SIZEOF_DMA2D_TRANS_T, PPA_MEM_ALLOC_CAPS); dma2d_trans_t *dma_trans_elm = (dma2d_trans_t *)heap_caps_calloc(1, SIZEOF_DMA2D_TRANS_T, PPA_MEM_ALLOC_CAPS);
dma2d_trans_config_t *dma_trans_desc = (dma2d_trans_config_t *)heap_caps_calloc(1, sizeof(dma2d_trans_config_t), PPA_MEM_ALLOC_CAPS); dma2d_trans_config_t *dma_trans_desc = (dma2d_trans_config_t *)heap_caps_calloc(1, sizeof(dma2d_trans_config_t), PPA_MEM_ALLOC_CAPS);
ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)heap_caps_calloc(1, sizeof(ppa_dma2d_trans_on_picked_config_t), PPA_MEM_ALLOC_CAPS); ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)heap_caps_calloc(1, sizeof(ppa_dma2d_trans_on_picked_config_t), PPA_MEM_ALLOC_CAPS);
size_t ppa_trans_desc_size = (ppa_operation == PPA_OPERATION_SR) ? sizeof(ppa_sr_oper_t) : size_t ppa_trans_desc_size = (ppa_operation == PPA_OPERATION_SRM) ? sizeof(ppa_srm_oper_t) :
(ppa_operation == PPA_OPERATION_BLEND) ? sizeof(ppa_blend_oper_t) : (ppa_operation == PPA_OPERATION_BLEND) ? sizeof(ppa_blend_oper_t) :
(ppa_operation == PPA_OPERATION_FILL) ? sizeof(ppa_fill_oper_t) : 0; (ppa_operation == PPA_OPERATION_FILL) ? sizeof(ppa_fill_oper_t) : 0;
assert(ppa_trans_desc_size != 0); assert(ppa_trans_desc_size != 0);
@@ -735,7 +745,7 @@ static esp_err_t ppa_prepare_trans_elm(ppa_invoker_handle_t ppa_invoker, ppa_eng
ESP_GOTO_ON_FALSE(new_trans_elm->sem, ESP_ERR_NO_MEM, err, TAG, "no mem for transaction storage"); ESP_GOTO_ON_FALSE(new_trans_elm->sem, ESP_ERR_NO_MEM, err, TAG, "no mem for transaction storage");
} }
size_t cpy_size = (ppa_operation == PPA_OPERATION_SR) ? sizeof(ppa_sr_operation_config_t) : size_t cpy_size = (ppa_operation == PPA_OPERATION_SRM) ? sizeof(ppa_srm_operation_config_t) :
(ppa_operation == PPA_OPERATION_BLEND) ? sizeof(ppa_blend_operation_config_t) : (ppa_operation == PPA_OPERATION_BLEND) ? sizeof(ppa_blend_operation_config_t) :
(ppa_operation == PPA_OPERATION_FILL) ? sizeof(ppa_fill_operation_config_t) : 0; (ppa_operation == PPA_OPERATION_FILL) ? sizeof(ppa_fill_operation_config_t) : 0;
memcpy(ppa_trans_desc, oper_config, cpy_size); memcpy(ppa_trans_desc, oper_config, cpy_size);
@@ -743,9 +753,9 @@ static esp_err_t ppa_prepare_trans_elm(ppa_invoker_handle_t ppa_invoker, ppa_eng
trans_on_picked_desc->op_desc = ppa_trans_desc; trans_on_picked_desc->op_desc = ppa_trans_desc;
trans_on_picked_desc->ppa_engine = ppa_engine_base; trans_on_picked_desc->ppa_engine = ppa_engine_base;
trans_on_picked_desc->trans_elm = new_trans_elm; trans_on_picked_desc->trans_elm = new_trans_elm;
trans_on_picked_desc->trigger_periph = (engine_type == PPA_ENGINE_TYPE_SR) ? DMA2D_TRIG_PERIPH_PPA_SR : DMA2D_TRIG_PERIPH_PPA_BLEND; trans_on_picked_desc->trigger_periph = (engine_type == PPA_ENGINE_TYPE_SRM) ? DMA2D_TRIG_PERIPH_PPA_SRM : DMA2D_TRIG_PERIPH_PPA_BLEND;
dma_trans_desc->tx_channel_num = (ppa_operation == PPA_OPERATION_SR) ? 1 : dma_trans_desc->tx_channel_num = (ppa_operation == PPA_OPERATION_SRM) ? 1 :
(ppa_operation == PPA_OPERATION_BLEND) ? 2 : 0; // PPA_OPERATION_FILL does not have data input (ppa_operation == PPA_OPERATION_BLEND) ? 2 : 0; // PPA_OPERATION_FILL does not have data input
dma_trans_desc->rx_channel_num = 1; dma_trans_desc->rx_channel_num = 1;
@@ -753,9 +763,7 @@ static esp_err_t ppa_prepare_trans_elm(ppa_invoker_handle_t ppa_invoker, ppa_eng
// dma_trans_desc->specified_rx_channel_mask = ppa_specified_rx_channel_mask; // dma_trans_desc->specified_rx_channel_mask = ppa_specified_rx_channel_mask;
dma_trans_desc->user_config = (void *)trans_on_picked_desc; dma_trans_desc->user_config = (void *)trans_on_picked_desc;
dma_trans_desc->on_job_picked = (ppa_operation == PPA_OPERATION_SR) ? ppa_sr_transaction_on_picked : dma_trans_desc->on_job_picked = ppa_oper_trans_on_picked_func[ppa_operation];
(ppa_operation == PPA_OPERATION_BLEND) ? ppa_blend_transaction_on_picked :
(ppa_operation == PPA_OPERATION_FILL) ? ppa_fill_transaction_on_picked : NULL;
new_trans_elm->trans_desc = dma_trans_desc; new_trans_elm->trans_desc = dma_trans_desc;
new_trans_elm->dma_trans_placeholder = dma_trans_elm; new_trans_elm->dma_trans_placeholder = dma_trans_elm;
@@ -779,8 +787,8 @@ static esp_err_t ppa_do_operation(ppa_invoker_handle_t ppa_invoker, ppa_engine_t
if (ppa_invoker->in_accepting_trans_state) { if (ppa_invoker->in_accepting_trans_state) {
// Send transaction into PPA engine queue // Send transaction into PPA engine queue
STAILQ_INSERT_TAIL(&ppa_engine_base->trans_stailq, trans_elm, entry); STAILQ_INSERT_TAIL(&ppa_engine_base->trans_stailq, trans_elm, entry);
if (engine_type == PPA_ENGINE_TYPE_SR) { if (engine_type == PPA_ENGINE_TYPE_SRM) {
ppa_invoker->sr_trans_cnt++; ppa_invoker->srm_trans_cnt++;
} else { } else {
ppa_invoker->blending_trans_cnt++; ppa_invoker->blending_trans_cnt++;
} }
@@ -868,8 +876,8 @@ static bool ppa_transaction_done_cb(dma2d_channel_handle_t dma2d_chan, dma2d_eve
} }
portENTER_CRITICAL_ISR(&invoker->spinlock); portENTER_CRITICAL_ISR(&invoker->spinlock);
if (engine_type == PPA_ENGINE_TYPE_SR) { if (engine_type == PPA_ENGINE_TYPE_SRM) {
invoker->sr_trans_cnt--; invoker->srm_trans_cnt--;
} else { } else {
invoker->blending_trans_cnt--; invoker->blending_trans_cnt--;
} }
@@ -880,14 +888,14 @@ static bool ppa_transaction_done_cb(dma2d_channel_handle_t dma2d_chan, dma2d_eve
return need_yield; return need_yield;
} }
static bool ppa_sr_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config) static bool ppa_srm_transaction_on_picked(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config)
{ {
assert(num_chans == 2 && dma2d_chans && user_config); assert(num_chans == 2 && dma2d_chans && user_config);
ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)user_config; ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = (ppa_dma2d_trans_on_picked_config_t *)user_config;
assert(trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SR && trans_on_picked_desc->sr_desc && trans_on_picked_desc->ppa_engine); assert(trans_on_picked_desc->trigger_periph == DMA2D_TRIG_PERIPH_PPA_SRM && trans_on_picked_desc->srm_desc && trans_on_picked_desc->ppa_engine);
ppa_sr_oper_t *sr_trans_desc = trans_on_picked_desc->sr_desc; ppa_srm_oper_t *srm_trans_desc = trans_on_picked_desc->srm_desc;
ppa_sr_engine_t *sr_engine = __containerof(trans_on_picked_desc->ppa_engine, ppa_sr_engine_t, base); ppa_srm_engine_t *srm_engine = __containerof(trans_on_picked_desc->ppa_engine, ppa_srm_engine_t, base);
// Free 2D-DMA transaction placeholder (transaction has already been moved out from 2D-DMA queue) // Free 2D-DMA transaction placeholder (transaction has already been moved out from 2D-DMA queue)
free(trans_on_picked_desc->trans_elm->dma_trans_placeholder); free(trans_on_picked_desc->trans_elm->dma_trans_placeholder);
@@ -905,62 +913,62 @@ static bool ppa_sr_transaction_on_picked(uint32_t num_chans, const dma2d_trans_c
// Write back and invalidate are performed on the entire picture (the window content is not continuous in the buffer) // Write back and invalidate are performed on the entire picture (the window content is not continuous in the buffer)
// Write back in_buffer // Write back in_buffer
color_space_pixel_format_t in_pixel_format = { color_space_pixel_format_t in_pixel_format = {
.color_type_id = sr_trans_desc->in_color.mode, .color_type_id = srm_trans_desc->in_color.mode,
}; };
uint32_t in_buffer_len = sr_trans_desc->in_pic_w * sr_trans_desc->in_pic_h * color_hal_pixel_format_get_bit_depth(in_pixel_format) / 8; uint32_t in_buffer_len = srm_trans_desc->in_pic_w * srm_trans_desc->in_pic_h * color_hal_pixel_format_get_bit_depth(in_pixel_format) / 8;
esp_cache_msync(sr_trans_desc->in_buffer, in_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M); esp_cache_msync(srm_trans_desc->in_buffer, in_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// Invalidate out_buffer // Invalidate out_buffer
color_space_pixel_format_t out_pixel_format = { color_space_pixel_format_t out_pixel_format = {
.color_type_id = sr_trans_desc->out_color.mode, .color_type_id = srm_trans_desc->out_color.mode,
}; };
uint32_t out_buffer_len = sr_trans_desc->out_pic_w * sr_trans_desc->out_pic_h * color_hal_pixel_format_get_bit_depth(out_pixel_format) / 8; uint32_t out_buffer_len = srm_trans_desc->out_pic_w * srm_trans_desc->out_pic_h * color_hal_pixel_format_get_bit_depth(out_pixel_format) / 8;
esp_cache_msync(sr_trans_desc->out_buffer, out_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C); esp_cache_msync(srm_trans_desc->out_buffer, out_buffer_len, ESP_CACHE_MSYNC_FLAG_DIR_M2C);
// Fill 2D-DMA descriptors // Fill 2D-DMA descriptors
sr_engine->dma_tx_desc->vb_size = sr_trans_desc->in_block_h; srm_engine->dma_tx_desc->vb_size = srm_trans_desc->in_block_h;
sr_engine->dma_tx_desc->hb_length = sr_trans_desc->in_block_w; srm_engine->dma_tx_desc->hb_length = srm_trans_desc->in_block_w;
sr_engine->dma_tx_desc->err_eof = 0; srm_engine->dma_tx_desc->err_eof = 0;
sr_engine->dma_tx_desc->dma2d_en = 1; srm_engine->dma_tx_desc->dma2d_en = 1;
sr_engine->dma_tx_desc->suc_eof = 1; srm_engine->dma_tx_desc->suc_eof = 1;
sr_engine->dma_tx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA; srm_engine->dma_tx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA;
sr_engine->dma_tx_desc->va_size = sr_trans_desc->in_pic_h; srm_engine->dma_tx_desc->va_size = srm_trans_desc->in_pic_h;
sr_engine->dma_tx_desc->ha_length = sr_trans_desc->in_pic_w; srm_engine->dma_tx_desc->ha_length = srm_trans_desc->in_pic_w;
sr_engine->dma_tx_desc->pbyte = dma2d_desc_pixel_format_to_pbyte_value(in_pixel_format); srm_engine->dma_tx_desc->pbyte = dma2d_desc_pixel_format_to_pbyte_value(in_pixel_format);
sr_engine->dma_tx_desc->y = sr_trans_desc->in_block_offset_y; srm_engine->dma_tx_desc->y = srm_trans_desc->in_block_offset_y;
sr_engine->dma_tx_desc->x = sr_trans_desc->in_block_offset_x; srm_engine->dma_tx_desc->x = srm_trans_desc->in_block_offset_x;
sr_engine->dma_tx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE; srm_engine->dma_tx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE;
sr_engine->dma_tx_desc->buffer = (void *)sr_trans_desc->in_buffer; srm_engine->dma_tx_desc->buffer = (void *)srm_trans_desc->in_buffer;
sr_engine->dma_tx_desc->next = NULL; srm_engine->dma_tx_desc->next = NULL;
// vb_size, hb_length can be any value (auto writeback). However, if vb_size/hb_length is 0, it triggers 2D-DMA DESC_ERROR interrupt, and dma2d driver will automatically ends the transaction. Therefore, to avoid this, we set them to 1. // vb_size, hb_length can be any value (auto writeback). However, if vb_size/hb_length is 0, it triggers 2D-DMA DESC_ERROR interrupt, and dma2d driver will automatically ends the transaction. Therefore, to avoid this, we set them to 1.
sr_engine->dma_rx_desc->vb_size = 1; srm_engine->dma_rx_desc->vb_size = 1;
sr_engine->dma_rx_desc->hb_length = 1; srm_engine->dma_rx_desc->hb_length = 1;
sr_engine->dma_rx_desc->err_eof = 0; srm_engine->dma_rx_desc->err_eof = 0;
sr_engine->dma_rx_desc->dma2d_en = 1; srm_engine->dma_rx_desc->dma2d_en = 1;
sr_engine->dma_rx_desc->suc_eof = 1; srm_engine->dma_rx_desc->suc_eof = 1;
sr_engine->dma_rx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA; srm_engine->dma_rx_desc->owner = DMA2D_DESCRIPTOR_BUFFER_OWNER_DMA;
sr_engine->dma_rx_desc->va_size = sr_trans_desc->out_pic_h; srm_engine->dma_rx_desc->va_size = srm_trans_desc->out_pic_h;
sr_engine->dma_rx_desc->ha_length = sr_trans_desc->out_pic_w; srm_engine->dma_rx_desc->ha_length = srm_trans_desc->out_pic_w;
// pbyte can be any value // pbyte can be any value
sr_engine->dma_rx_desc->y = sr_trans_desc->out_block_offset_y; srm_engine->dma_rx_desc->y = srm_trans_desc->out_block_offset_y;
sr_engine->dma_rx_desc->x = sr_trans_desc->out_block_offset_x; srm_engine->dma_rx_desc->x = srm_trans_desc->out_block_offset_x;
sr_engine->dma_rx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE; srm_engine->dma_rx_desc->mode = DMA2D_DESCRIPTOR_BLOCK_RW_MODE_SINGLE;
sr_engine->dma_rx_desc->buffer = (void *)sr_trans_desc->out_buffer; srm_engine->dma_rx_desc->buffer = (void *)srm_trans_desc->out_buffer;
sr_engine->dma_rx_desc->next = NULL; srm_engine->dma_rx_desc->next = NULL;
esp_cache_msync((void *)sr_engine->dma_tx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); esp_cache_msync((void *)srm_engine->dma_tx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
esp_cache_msync((void *)sr_engine->dma_rx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M); esp_cache_msync((void *)srm_engine->dma_rx_desc, s_platform.dma_desc_mem_size, ESP_CACHE_MSYNC_FLAG_DIR_C2M);
// printf("desc addr: %p\n", sr_engine->dma_rx_desc); // printf("desc addr: %p\n", srm_engine->dma_rx_desc);
// printf("desc content: %08lX, %08lX, %08lX, %08lX, %08lX\n", *(uint32_t *)sr_engine->dma_rx_desc, *(uint32_t *)((uint32_t)sr_engine->dma_rx_desc + 4), *(uint32_t *)((uint32_t)sr_engine->dma_rx_desc + 8), *(uint32_t *)((uint32_t)sr_engine->dma_rx_desc + 12), *(uint32_t *)((uint32_t)sr_engine->dma_rx_desc + 16)); // printf("desc content: %08lX, %08lX, %08lX, %08lX, %08lX\n", *(uint32_t *)srm_engine->dma_rx_desc, *(uint32_t *)((uint32_t)srm_engine->dma_rx_desc + 4), *(uint32_t *)((uint32_t)srm_engine->dma_rx_desc + 8), *(uint32_t *)((uint32_t)srm_engine->dma_rx_desc + 12), *(uint32_t *)((uint32_t)srm_engine->dma_rx_desc + 16));
// Configure 2D-DMA channels // Configure 2D-DMA channels
dma2d_trigger_t trig_periph = { dma2d_trigger_t trig_periph = {
.periph = DMA2D_TRIG_PERIPH_PPA_SR, .periph = DMA2D_TRIG_PERIPH_PPA_SRM,
.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SR_TX, .periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SRM_TX,
}; };
dma2d_connect(dma2d_tx_chan, &trig_periph); dma2d_connect(dma2d_tx_chan, &trig_periph);
trig_periph.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SR_RX; trig_periph.periph_sel_id = SOC_DMA2D_TRIG_PERIPH_PPA_SRM_RX;
dma2d_connect(dma2d_rx_chan, &trig_periph); dma2d_connect(dma2d_rx_chan, &trig_periph);
dma2d_transfer_ability_t dma_transfer_ability = { dma2d_transfer_ability_t dma_transfer_ability = {
@@ -972,20 +980,20 @@ static bool ppa_sr_transaction_on_picked(uint32_t num_chans, const dma2d_trans_c
dma2d_set_transfer_ability(dma2d_rx_chan, &dma_transfer_ability); dma2d_set_transfer_ability(dma2d_rx_chan, &dma_transfer_ability);
// YUV444 and YUV422 are not supported by PPA module, need to utilize 2D-DMA color space conversion feature to do a conversion // YUV444 and YUV422 are not supported by PPA module, need to utilize 2D-DMA color space conversion feature to do a conversion
ppa_sr_color_mode_t ppa_in_color_mode = sr_trans_desc->in_color.mode; ppa_srm_color_mode_t ppa_in_color_mode = srm_trans_desc->in_color.mode;
if (ppa_in_color_mode == PPA_SR_COLOR_MODE_YUV444) { if (ppa_in_color_mode == PPA_SRM_COLOR_MODE_YUV444) {
ppa_in_color_mode = PPA_SR_COLOR_MODE_RGB888; ppa_in_color_mode = PPA_SRM_COLOR_MODE_RGB888;
dma2d_csc_config_t dma_tx_csc = {0}; dma2d_csc_config_t dma_tx_csc = {0};
if (sr_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) { if (srm_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) {
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_601; dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_601;
} else { } else {
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_709; dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV444_TO_RGB888_709;
} }
dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc); dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc);
} else if (ppa_in_color_mode == PPA_SR_COLOR_MODE_YUV422) { } else if (ppa_in_color_mode == PPA_SRM_COLOR_MODE_YUV422) {
ppa_in_color_mode = PPA_SR_COLOR_MODE_RGB888; ppa_in_color_mode = PPA_SRM_COLOR_MODE_RGB888;
dma2d_csc_config_t dma_tx_csc = {0}; dma2d_csc_config_t dma_tx_csc = {0};
if (sr_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) { if (srm_trans_desc->in_color.yuv_std == COLOR_CONV_STD_RGB_YUV_BT601) {
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_601; dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_601;
} else { } else {
dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_709; dma_tx_csc.tx_csc_option = DMA2D_CSC_TX_YUV422_TO_RGB888_709;
@@ -993,9 +1001,9 @@ static bool ppa_sr_transaction_on_picked(uint32_t num_chans, const dma2d_trans_c
dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc); dma2d_configure_color_space_conversion(dma2d_tx_chan, &dma_tx_csc);
} }
ppa_sr_color_mode_t ppa_out_color_mode = sr_trans_desc->out_color.mode; ppa_srm_color_mode_t ppa_out_color_mode = srm_trans_desc->out_color.mode;
if (ppa_out_color_mode == PPA_SR_COLOR_MODE_YUV444) { if (ppa_out_color_mode == PPA_SRM_COLOR_MODE_YUV444) {
ppa_out_color_mode = PPA_SR_COLOR_MODE_YUV420; ppa_out_color_mode = PPA_SRM_COLOR_MODE_YUV420;
dma2d_csc_config_t dma_rx_csc = { dma2d_csc_config_t dma_rx_csc = {
.rx_csc_option = DMA2D_CSC_RX_YUV420_TO_YUV444, .rx_csc_option = DMA2D_CSC_RX_YUV420_TO_YUV444,
}; };
@@ -1007,74 +1015,76 @@ static bool ppa_sr_transaction_on_picked(uint32_t num_chans, const dma2d_trans_c
}; };
dma2d_register_rx_event_callbacks(dma2d_rx_chan, &dma_event_cbs, (void *)trans_on_picked_desc->trans_elm); dma2d_register_rx_event_callbacks(dma2d_rx_chan, &dma_event_cbs, (void *)trans_on_picked_desc->trans_elm);
ppa_ll_sr_reset(s_platform.hal.dev); ppa_ll_srm_reset(s_platform.hal.dev);
dma2d_set_desc_addr(dma2d_tx_chan, (intptr_t)sr_engine->dma_tx_desc); dma2d_set_desc_addr(dma2d_tx_chan, (intptr_t)srm_engine->dma_tx_desc);
dma2d_set_desc_addr(dma2d_rx_chan, (intptr_t)sr_engine->dma_rx_desc); dma2d_set_desc_addr(dma2d_rx_chan, (intptr_t)srm_engine->dma_rx_desc);
dma2d_start(dma2d_tx_chan); dma2d_start(dma2d_tx_chan);
dma2d_start(dma2d_rx_chan); dma2d_start(dma2d_rx_chan);
// Configure PPA SR engine // Configure PPA SRM engine
ppa_ll_sr_set_rx_color_mode(s_platform.hal.dev, ppa_in_color_mode); ppa_ll_srm_set_rx_color_mode(s_platform.hal.dev, ppa_in_color_mode);
if (COLOR_SPACE_TYPE(ppa_in_color_mode) == COLOR_SPACE_YUV) { if (COLOR_SPACE_TYPE(ppa_in_color_mode) == COLOR_SPACE_YUV) {
ppa_ll_sr_set_rx_yuv_range(s_platform.hal.dev, sr_trans_desc->in_color.yuv_range); ppa_ll_srm_set_rx_yuv_range(s_platform.hal.dev, srm_trans_desc->in_color.yuv_range);
ppa_ll_sr_set_yuv2rgb_std(s_platform.hal.dev, sr_trans_desc->in_color.yuv_std); ppa_ll_srm_set_yuv2rgb_std(s_platform.hal.dev, srm_trans_desc->in_color.yuv_std);
} }
ppa_ll_sr_enable_rx_byte_swap(s_platform.hal.dev, sr_trans_desc->in_color.byte_swap); ppa_ll_srm_enable_rx_byte_swap(s_platform.hal.dev, srm_trans_desc->in_color.byte_swap);
ppa_ll_sr_enable_rx_rgb_swap(s_platform.hal.dev, sr_trans_desc->in_color.rgb_swap); ppa_ll_srm_enable_rx_rgb_swap(s_platform.hal.dev, srm_trans_desc->in_color.rgb_swap);
ppa_ll_sr_configure_rx_alpha(s_platform.hal.dev, sr_trans_desc->in_color.alpha_mode, sr_trans_desc->in_color.alpha_value); ppa_ll_srm_configure_rx_alpha(s_platform.hal.dev, srm_trans_desc->in_color.alpha_mode, srm_trans_desc->in_color.alpha_value);
ppa_ll_sr_set_tx_color_mode(s_platform.hal.dev, ppa_out_color_mode); ppa_ll_srm_set_tx_color_mode(s_platform.hal.dev, ppa_out_color_mode);
if (COLOR_SPACE_TYPE(ppa_out_color_mode) == COLOR_SPACE_YUV) { if (COLOR_SPACE_TYPE(ppa_out_color_mode) == COLOR_SPACE_YUV) {
ppa_ll_sr_set_rx_yuv_range(s_platform.hal.dev, sr_trans_desc->out_color.yuv_range); ppa_ll_srm_set_rx_yuv_range(s_platform.hal.dev, srm_trans_desc->out_color.yuv_range);
ppa_ll_sr_set_yuv2rgb_std(s_platform.hal.dev, sr_trans_desc->out_color.yuv_std); ppa_ll_srm_set_yuv2rgb_std(s_platform.hal.dev, srm_trans_desc->out_color.yuv_std);
} }
// TODO: sr_macro_bk_ro_bypass // TODO: sr_macro_bk_ro_bypass
// PPA.sr_byte_order.sr_macro_bk_ro_bypass = 1; // PPA.sr_byte_order.sr_macro_bk_ro_bypass = 1;
ppa_ll_sr_set_rotation_angle(s_platform.hal.dev, sr_trans_desc->rotation_angle); ppa_ll_srm_set_rotation_angle(s_platform.hal.dev, srm_trans_desc->rotation_angle);
ppa_ll_sr_set_scaling_x(s_platform.hal.dev, sr_trans_desc->scale_x_int, sr_trans_desc->scale_x_frag); ppa_ll_srm_set_scaling_x(s_platform.hal.dev, srm_trans_desc->scale_x_int, srm_trans_desc->scale_x_frag);
ppa_ll_sr_set_scaling_y(s_platform.hal.dev, sr_trans_desc->scale_y_int, sr_trans_desc->scale_y_frag); ppa_ll_srm_set_scaling_y(s_platform.hal.dev, srm_trans_desc->scale_y_int, srm_trans_desc->scale_y_frag);
ppa_ll_sr_enable_mirror_x(s_platform.hal.dev, sr_trans_desc->mirror_x); ppa_ll_srm_enable_mirror_x(s_platform.hal.dev, srm_trans_desc->mirror_x);
ppa_ll_sr_enable_mirror_y(s_platform.hal.dev, sr_trans_desc->mirror_y); ppa_ll_srm_enable_mirror_y(s_platform.hal.dev, srm_trans_desc->mirror_y);
ppa_ll_sr_start(s_platform.hal.dev); ppa_ll_srm_start(s_platform.hal.dev);
// No need to yield // No need to yield
return false; return false;
} }
esp_err_t ppa_do_scale_and_rotate(ppa_invoker_handle_t ppa_invoker, const ppa_sr_operation_config_t *oper_config, const ppa_trans_config_t *trans_config) esp_err_t ppa_do_scale_rotate_mirror(ppa_invoker_handle_t ppa_invoker, const ppa_srm_operation_config_t *oper_config, const ppa_trans_config_t *trans_config)
{ {
ESP_RETURN_ON_FALSE(ppa_invoker && oper_config && trans_config, ESP_ERR_INVALID_ARG, TAG, "invalid argument"); ESP_RETURN_ON_FALSE(ppa_invoker && oper_config && trans_config, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
ESP_RETURN_ON_FALSE(ppa_invoker->sr_engine, ESP_ERR_INVALID_ARG, TAG, "invoker did not register to SR engine"); ESP_RETURN_ON_FALSE(ppa_invoker->srm_engine, ESP_ERR_INVALID_ARG, TAG, "invoker did not register to SRM engine");
ESP_RETURN_ON_FALSE(trans_config->mode <= PPA_TRANS_MODE_NON_BLOCKING, ESP_ERR_INVALID_ARG, TAG, "invalid mode"); ESP_RETURN_ON_FALSE(trans_config->mode <= PPA_TRANS_MODE_NON_BLOCKING, ESP_ERR_INVALID_ARG, TAG, "invalid mode");
// Any restrictions on in/out buffer address? alignment? alignment restriction comes from cache, its addr and size need to be aligned to cache line size on 912! // Any restrictions on in/out buffer address? alignment? alignment restriction comes from cache, its addr and size need to be aligned to cache line size on 912!
// buffer on stack/heap // buffer on stack/heap
// ESP_RETURN_ON_FALSE(config->rotation_angle) // ESP_RETURN_ON_FALSE(config->rotation_angle)
// ESP_RETURN_ON_FALSE(config->in/out_color_mode) // ESP_RETURN_ON_FALSE(config->in/out_color_mode)
// what if in_color is YUV420, out is RGB, what is out RGB range? Full range? // what if in_color is YUV420, out is RGB, what is out RGB range? Full range?
ESP_RETURN_ON_FALSE(oper_config->scale_x < (PPA_LL_SR_SCALING_INT_MAX + 1) && oper_config->scale_x >= (1.0 / PPA_LL_SR_SCALING_FRAG_MAX) && ESP_RETURN_ON_FALSE(oper_config->scale_x < (PPA_LL_SRM_SCALING_INT_MAX + 1) && oper_config->scale_x >= (1.0 / PPA_LL_SRM_SCALING_FRAG_MAX) &&
oper_config->scale_y < (PPA_LL_SR_SCALING_INT_MAX + 1) && oper_config->scale_y >= (1.0 / PPA_LL_SR_SCALING_FRAG_MAX), oper_config->scale_y < (PPA_LL_SRM_SCALING_INT_MAX + 1) && oper_config->scale_y >= (1.0 / PPA_LL_SRM_SCALING_FRAG_MAX),
ESP_ERR_INVALID_ARG, TAG, "invalid scale"); ESP_ERR_INVALID_ARG, TAG, "invalid scale");
// byte/rgb swap with color mode only to (A)RGB color space? // byte/rgb swap with color mode only to (A)RGB color space?
// YUV420: in desc, ha/hb/va/vb/x/y must be even number
// TODO: Maybe do buffer writeback and invalidation here, instead of in on_picked? // TODO: Maybe do buffer writeback and invalidation here, instead of in on_picked?
ppa_trans_t *trans_elm = NULL; ppa_trans_t *trans_elm = NULL;
esp_err_t ret = ppa_prepare_trans_elm(ppa_invoker, ppa_invoker->sr_engine, PPA_OPERATION_SR, (void *)oper_config, trans_config->mode, &trans_elm); esp_err_t ret = ppa_prepare_trans_elm(ppa_invoker, ppa_invoker->srm_engine, PPA_OPERATION_SRM, (void *)oper_config, trans_config->mode, &trans_elm);
if (ret == ESP_OK) { if (ret == ESP_OK) {
assert(trans_elm); assert(trans_elm);
// Pre-process some data // Pre-process some data
ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = trans_elm->trans_desc->user_config; ppa_dma2d_trans_on_picked_config_t *trans_on_picked_desc = trans_elm->trans_desc->user_config;
ppa_sr_oper_t *sr_trans_desc = trans_on_picked_desc->sr_desc; ppa_srm_oper_t *srm_trans_desc = trans_on_picked_desc->srm_desc;
sr_trans_desc->scale_x_int = (uint32_t)sr_trans_desc->scale_x; srm_trans_desc->scale_x_int = (uint32_t)srm_trans_desc->scale_x;
sr_trans_desc->scale_x_frag = (uint32_t)(sr_trans_desc->scale_x * (PPA_LL_SR_SCALING_FRAG_MAX + 1)) & PPA_LL_SR_SCALING_FRAG_MAX; srm_trans_desc->scale_x_frag = (uint32_t)(srm_trans_desc->scale_x * (PPA_LL_SRM_SCALING_FRAG_MAX + 1)) & PPA_LL_SRM_SCALING_FRAG_MAX;
sr_trans_desc->scale_y_int = (uint32_t)sr_trans_desc->scale_y; srm_trans_desc->scale_y_int = (uint32_t)srm_trans_desc->scale_y;
sr_trans_desc->scale_y_frag = (uint32_t)(sr_trans_desc->scale_y * (PPA_LL_SR_SCALING_FRAG_MAX + 1)) & PPA_LL_SR_SCALING_FRAG_MAX; srm_trans_desc->scale_y_frag = (uint32_t)(srm_trans_desc->scale_y * (PPA_LL_SRM_SCALING_FRAG_MAX + 1)) & PPA_LL_SRM_SCALING_FRAG_MAX;
ret = ppa_do_operation(ppa_invoker, ppa_invoker->sr_engine, trans_elm, trans_config->mode); ret = ppa_do_operation(ppa_invoker, ppa_invoker->srm_engine, trans_elm, trans_config->mode);
if (ret != ESP_OK) { if (ret != ESP_OK) {
ppa_recycle_transaction(trans_elm); ppa_recycle_transaction(trans_elm);
} }
@@ -1247,6 +1257,7 @@ esp_err_t ppa_do_blend(ppa_invoker_handle_t ppa_invoker, const ppa_blend_operati
ESP_RETURN_ON_FALSE(ppa_invoker->blending_engine, ESP_ERR_INVALID_ARG, TAG, "invoker did not register to Blending engine"); ESP_RETURN_ON_FALSE(ppa_invoker->blending_engine, ESP_ERR_INVALID_ARG, TAG, "invoker did not register to Blending engine");
ESP_RETURN_ON_FALSE(trans_config->mode <= PPA_TRANS_MODE_NON_BLOCKING, ESP_ERR_INVALID_ARG, TAG, "invalid mode"); ESP_RETURN_ON_FALSE(trans_config->mode <= PPA_TRANS_MODE_NON_BLOCKING, ESP_ERR_INVALID_ARG, TAG, "invalid mode");
// TODO: ARG CHECK // TODO: ARG CHECK
// 当输入类型为 L4、A4 时,图像块的尺寸 hb 以及在图像中的偏移 x 必须为偶数
// TODO: Maybe do buffer writeback and invalidation here, instead of in on_picked? // TODO: Maybe do buffer writeback and invalidation here, instead of in on_picked?

View File

@@ -571,7 +571,7 @@ esp_err_t dma2d_connect(dma2d_channel_handle_t dma2d_chan, const dma2d_trigger_t
// Configure reorder functionality // Configure reorder functionality
dma2d_ll_tx_enable_reorder(group->hal.dev, channel_id, dma2d_chan->status.reorder_en); dma2d_ll_tx_enable_reorder(group->hal.dev, channel_id, dma2d_chan->status.reorder_en);
// Assume dscr_port enable or not can be directly derived from trig_periph // Assume dscr_port enable or not can be directly derived from trig_periph
dma2d_ll_tx_enable_dscr_port(group->hal.dev, channel_id, trig_periph->periph == DMA2D_TRIG_PERIPH_PPA_SR); dma2d_ll_tx_enable_dscr_port(group->hal.dev, channel_id, trig_periph->periph == DMA2D_TRIG_PERIPH_PPA_SRM);
// Reset to certain settings // Reset to certain settings
dma2d_ll_tx_enable_owner_check(group->hal.dev, channel_id, false); dma2d_ll_tx_enable_owner_check(group->hal.dev, channel_id, false);
@@ -596,7 +596,7 @@ esp_err_t dma2d_connect(dma2d_channel_handle_t dma2d_chan, const dma2d_trigger_t
// Configure reorder functionality // Configure reorder functionality
dma2d_ll_rx_enable_reorder(group->hal.dev, channel_id, dma2d_chan->status.reorder_en); dma2d_ll_rx_enable_reorder(group->hal.dev, channel_id, dma2d_chan->status.reorder_en);
// Assume dscr_port enable or not can be directly derived from trig_periph // Assume dscr_port enable or not can be directly derived from trig_periph
dma2d_ll_rx_enable_dscr_port(group->hal.dev, channel_id, trig_periph->periph == DMA2D_TRIG_PERIPH_PPA_SR); dma2d_ll_rx_enable_dscr_port(group->hal.dev, channel_id, trig_periph->periph == DMA2D_TRIG_PERIPH_PPA_SRM);
// Reset to certain settings // Reset to certain settings
dma2d_ll_rx_enable_owner_check(group->hal.dev, channel_id, false); dma2d_ll_rx_enable_owner_check(group->hal.dev, channel_id, false);

View File

@@ -104,7 +104,7 @@ typedef struct {
* @return Whether a task switch is needed after the callback function returns, * @return Whether a task switch is needed after the callback function returns,
* this is usually due to the callback wakes up some high priority task. * this is usually due to the callback wakes up some high priority task.
*/ */
typedef bool (*dma2d_trans_callback_t)(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config); typedef bool (*dma2d_trans_on_picked_callback_t)(uint32_t num_chans, const dma2d_trans_channel_info_t *dma2d_chans, void *user_config);
/** /**
* @brief 2D-DMA channel special function flags * @brief 2D-DMA channel special function flags
@@ -131,7 +131,7 @@ typedef struct {
uint32_t specified_tx_channel_mask; /*!< Bit mask of the specific TX channels to be used, the specified TX channels should have been reserved */ uint32_t specified_tx_channel_mask; /*!< Bit mask of the specific TX channels to be used, the specified TX channels should have been reserved */
uint32_t specified_rx_channel_mask; /*!< Bit mask of the specific RX channels to be used, the specified RX channels should have been reserved */ uint32_t specified_rx_channel_mask; /*!< Bit mask of the specific RX channels to be used, the specified RX channels should have been reserved */
dma2d_trans_callback_t on_job_picked; /*!< Callback function to be called when all necessary channels to do the transaction have been acquired */ dma2d_trans_on_picked_callback_t on_job_picked; /*!< Callback function to be called when all necessary channels to do the transaction have been acquired */
void *user_config; /*!< User registered data to be passed into `on_job_picked` callback */ void *user_config; /*!< User registered data to be passed into `on_job_picked` callback */
} dma2d_trans_config_t; } dma2d_trans_config_t;

View File

@@ -24,8 +24,8 @@ extern "C" {
#define PPA_LL_BLEND0_CLUT_MEM_ADDR_OFFSET 0x400 #define PPA_LL_BLEND0_CLUT_MEM_ADDR_OFFSET 0x400
#define PPA_LL_BLEND1_CLUT_MEM_ADDR_OFFSET 0x800 #define PPA_LL_BLEND1_CLUT_MEM_ADDR_OFFSET 0x800
#define PPA_LL_SR_SCALING_INT_MAX PPA_SR_SCAL_X_INT_V #define PPA_LL_SRM_SCALING_INT_MAX PPA_SR_SCAL_X_INT_V
#define PPA_LL_SR_SCALING_FRAG_MAX PPA_SR_SCAL_X_FRAG_V #define PPA_LL_SRM_SCALING_FRAG_MAX PPA_SR_SCAL_X_FRAG_V
/** /**
* @brief Enumeration of PPA blending mode * @brief Enumeration of PPA blending mode
@@ -64,13 +64,13 @@ static inline void ppa_ll_reset_register(void)
/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance /// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance
#define ppa_ll_reset_register(...) (void)__DECLARE_RCC_ATOMIC_ENV; ppa_ll_reset_register(__VA_ARGS__) #define ppa_ll_reset_register(...) (void)__DECLARE_RCC_ATOMIC_ENV; ppa_ll_reset_register(__VA_ARGS__)
/////////////////////////// Scaling and Rotating (SR) //////////////////////////////// ///////////////////////// Scaling, Rotating, Mirroring (SRM) //////////////////////////////
/** /**
* @brief Reset PPA scaling and rotating engine * @brief Reset PPA scaling-rotating-mirroring engine
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
*/ */
static inline void ppa_ll_sr_reset(ppa_dev_t *dev) static inline void ppa_ll_srm_reset(ppa_dev_t *dev)
{ {
dev->sr_scal_rotate.scal_rotate_rst = 1; dev->sr_scal_rotate.scal_rotate_rst = 1;
dev->sr_scal_rotate.scal_rotate_rst = 0; dev->sr_scal_rotate.scal_rotate_rst = 0;
@@ -83,7 +83,7 @@ static inline void ppa_ll_sr_reset(ppa_dev_t *dev)
* @param x_int The integrated part of scaling coefficient in X direction, 0 - 255 * @param x_int The integrated part of scaling coefficient in X direction, 0 - 255
* @param x_frag The fragment part of scaling coefficient in X direction, 0 - 15. Corresponding fractional value is x_frag/16. * @param x_frag The fragment part of scaling coefficient in X direction, 0 - 15. Corresponding fractional value is x_frag/16.
*/ */
static inline void ppa_ll_sr_set_scaling_x(ppa_dev_t *dev, uint32_t x_int, uint32_t x_frag) static inline void ppa_ll_srm_set_scaling_x(ppa_dev_t *dev, uint32_t x_int, uint32_t x_frag)
{ {
HAL_ASSERT(x_int <= PPA_SR_SCAL_X_INT_V && x_frag <= PPA_SR_SCAL_X_FRAG_V); HAL_ASSERT(x_int <= PPA_SR_SCAL_X_INT_V && x_frag <= PPA_SR_SCAL_X_FRAG_V);
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sr_scal_rotate, sr_scal_x_int, x_int); HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sr_scal_rotate, sr_scal_x_int, x_int);
@@ -97,7 +97,7 @@ static inline void ppa_ll_sr_set_scaling_x(ppa_dev_t *dev, uint32_t x_int, uint3
* @param y_int The integrated part of scaling coefficient in Y direction, 0 - 255 * @param y_int The integrated part of scaling coefficient in Y direction, 0 - 255
* @param y_frag The fragment part of scaling coefficient in Y direction, 0 - 15. Corresponding fractional value is y_frag/16. * @param y_frag The fragment part of scaling coefficient in Y direction, 0 - 15. Corresponding fractional value is y_frag/16.
*/ */
static inline void ppa_ll_sr_set_scaling_y(ppa_dev_t *dev, uint32_t y_int, uint32_t y_frag) static inline void ppa_ll_srm_set_scaling_y(ppa_dev_t *dev, uint32_t y_int, uint32_t y_frag)
{ {
HAL_ASSERT(y_int <= PPA_SR_SCAL_Y_INT_V && y_frag <= PPA_SR_SCAL_Y_FRAG_V); HAL_ASSERT(y_int <= PPA_SR_SCAL_Y_INT_V && y_frag <= PPA_SR_SCAL_Y_FRAG_V);
HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sr_scal_rotate, sr_scal_y_int, y_int); HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sr_scal_rotate, sr_scal_y_int, y_int);
@@ -108,22 +108,22 @@ static inline void ppa_ll_sr_set_scaling_y(ppa_dev_t *dev, uint32_t y_int, uint3
* @brief Set PPA rotation angle (in the counterclockwise direction) * @brief Set PPA rotation angle (in the counterclockwise direction)
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param angle One of the values in ppa_sr_rotation_angle_t * @param angle One of the values in ppa_srm_rotation_angle_t
*/ */
static inline void ppa_ll_sr_set_rotation_angle(ppa_dev_t *dev, ppa_sr_rotation_angle_t angle) static inline void ppa_ll_srm_set_rotation_angle(ppa_dev_t *dev, ppa_srm_rotation_angle_t angle)
{ {
uint32_t val = 0; uint32_t val = 0;
switch (angle) { switch (angle) {
case PPA_SR_ROTATION_ANGLE_0: case PPA_SRM_ROTATION_ANGLE_0:
val = 0; val = 0;
break; break;
case PPA_SR_ROTATION_ANGLE_90: case PPA_SRM_ROTATION_ANGLE_90:
val = 1; val = 1;
break; break;
case PPA_SR_ROTATION_ANGLE_180: case PPA_SRM_ROTATION_ANGLE_180:
val = 2; val = 2;
break; break;
case PPA_SR_ROTATION_ANGLE_270: case PPA_SRM_ROTATION_ANGLE_270:
val = 3; val = 3;
break; break;
default: default:
@@ -139,7 +139,7 @@ static inline void ppa_ll_sr_set_rotation_angle(ppa_dev_t *dev, ppa_sr_rotation_
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param enable True to enable; False to disable * @param enable True to enable; False to disable
*/ */
static inline void ppa_ll_sr_enable_mirror_x(ppa_dev_t *dev, bool enable) static inline void ppa_ll_srm_enable_mirror_x(ppa_dev_t *dev, bool enable)
{ {
dev->sr_scal_rotate.sr_mirror_x = enable; dev->sr_scal_rotate.sr_mirror_x = enable;
} }
@@ -150,86 +150,86 @@ static inline void ppa_ll_sr_enable_mirror_x(ppa_dev_t *dev, bool enable)
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param enable True to enable; False to disable * @param enable True to enable; False to disable
*/ */
static inline void ppa_ll_sr_enable_mirror_y(ppa_dev_t *dev, bool enable) static inline void ppa_ll_srm_enable_mirror_y(ppa_dev_t *dev, bool enable)
{ {
dev->sr_scal_rotate.sr_mirror_y = enable; dev->sr_scal_rotate.sr_mirror_y = enable;
} }
/** /**
* @brief Start PPA scaling and rotating engine to perform PPA SR * @brief Start PPA scaling and rotating engine to perform PPA SRM
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
*/ */
static inline void ppa_ll_sr_start(ppa_dev_t *dev) static inline void ppa_ll_srm_start(ppa_dev_t *dev)
{ {
dev->sr_scal_rotate.scal_rotate_start = 1; dev->sr_scal_rotate.scal_rotate_start = 1;
} }
/** /**
* @brief Set the source image color mode for PPA Scaling and Rotating engine RX * @brief Set the source image color mode for PPA Scaling-Rotating-Mirroring engine RX
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param color_mode One of the values in ppa_sr_color_mode_t * @param color_mode One of the values in ppa_srm_color_mode_t
*/ */
static inline void ppa_ll_sr_set_rx_color_mode(ppa_dev_t *dev, ppa_sr_color_mode_t color_mode) static inline void ppa_ll_srm_set_rx_color_mode(ppa_dev_t *dev, ppa_srm_color_mode_t color_mode)
{ {
uint32_t val = 0; uint32_t val = 0;
switch (color_mode) { switch (color_mode) {
case PPA_SR_COLOR_MODE_ARGB8888: case PPA_SRM_COLOR_MODE_ARGB8888:
val = 0; val = 0;
break; break;
case PPA_SR_COLOR_MODE_RGB888: case PPA_SRM_COLOR_MODE_RGB888:
val = 1; val = 1;
break; break;
case PPA_SR_COLOR_MODE_RGB565: case PPA_SRM_COLOR_MODE_RGB565:
val = 2; val = 2;
break; break;
case PPA_SR_COLOR_MODE_YUV420: case PPA_SRM_COLOR_MODE_YUV420:
val = 8; val = 8;
break; break;
default: default:
// Unsupported SR rx color mode // Unsupported SRM rx color mode
abort(); abort();
} }
dev->sr_color_mode.sr_rx_cm = val; dev->sr_color_mode.sr_rx_cm = val;
} }
/** /**
* @brief Set the destination image color mode for PPA Scaling and Rotating engine TX * @brief Set the destination image color mode for PPA Scaling-Rotating-Mirroring engine TX
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param color_mode One of the values in ppa_sr_color_mode_t * @param color_mode One of the values in ppa_srm_color_mode_t
*/ */
static inline void ppa_ll_sr_set_tx_color_mode(ppa_dev_t *dev, ppa_sr_color_mode_t color_mode) static inline void ppa_ll_srm_set_tx_color_mode(ppa_dev_t *dev, ppa_srm_color_mode_t color_mode)
{ {
uint32_t val = 0; uint32_t val = 0;
switch (color_mode) { switch (color_mode) {
case PPA_SR_COLOR_MODE_ARGB8888: case PPA_SRM_COLOR_MODE_ARGB8888:
val = 0; val = 0;
break; break;
case PPA_SR_COLOR_MODE_RGB888: case PPA_SRM_COLOR_MODE_RGB888:
val = 1; val = 1;
break; break;
case PPA_SR_COLOR_MODE_RGB565: case PPA_SRM_COLOR_MODE_RGB565:
val = 2; val = 2;
break; break;
case PPA_SR_COLOR_MODE_YUV420: case PPA_SRM_COLOR_MODE_YUV420:
val = 8; val = 8;
break; break;
default: default:
// Unsupported SR tx color mode // Unsupported SRM tx color mode
abort(); abort();
} }
dev->sr_color_mode.sr_tx_cm = val; dev->sr_color_mode.sr_tx_cm = val;
} }
/** /**
* @brief Set YUV to RGB protocol when PPA SR RX pixel color space is YUV * @brief Set YUV to RGB protocol when PPA SRM RX pixel color space is YUV
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param std One of the RGB-YUV conversion standards in color_conv_std_rgb_yuv_t * @param std One of the RGB-YUV conversion standards in color_conv_std_rgb_yuv_t
*/ */
static inline void ppa_ll_sr_set_yuv2rgb_std(ppa_dev_t *dev, color_conv_std_rgb_yuv_t std) static inline void ppa_ll_srm_set_yuv2rgb_std(ppa_dev_t *dev, color_conv_std_rgb_yuv_t std)
{ {
switch (std) { switch (std) {
case COLOR_CONV_STD_RGB_YUV_BT601: case COLOR_CONV_STD_RGB_YUV_BT601:
@@ -245,12 +245,12 @@ static inline void ppa_ll_sr_set_yuv2rgb_std(ppa_dev_t *dev, color_conv_std_rgb_
} }
/** /**
* @brief Set RGB to YUV protocol when PPA SR TX pixel color space is YUV * @brief Set RGB to YUV protocol when PPA SRM TX pixel color space is YUV
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param std One of the RGB-YUV conversion standards in color_conv_std_rgb_yuv_t * @param std One of the RGB-YUV conversion standards in color_conv_std_rgb_yuv_t
*/ */
static inline void ppa_ll_sr_set_rgb2yuv_std(ppa_dev_t *dev, color_conv_std_rgb_yuv_t std) static inline void ppa_ll_srm_set_rgb2yuv_std(ppa_dev_t *dev, color_conv_std_rgb_yuv_t std)
{ {
switch (std) { switch (std) {
case COLOR_CONV_STD_RGB_YUV_BT601: case COLOR_CONV_STD_RGB_YUV_BT601:
@@ -266,12 +266,12 @@ static inline void ppa_ll_sr_set_rgb2yuv_std(ppa_dev_t *dev, color_conv_std_rgb_
} }
/** /**
* @brief Set PPA SR YUV input range * @brief Set PPA SRM YUV input range
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param range One of color range options in color_range_t * @param range One of color range options in color_range_t
*/ */
static inline void ppa_ll_sr_set_rx_yuv_range(ppa_dev_t *dev, color_range_t range) static inline void ppa_ll_srm_set_rx_yuv_range(ppa_dev_t *dev, color_range_t range)
{ {
switch (range) { switch (range) {
case COLOR_RANGE_LIMIT: case COLOR_RANGE_LIMIT:
@@ -287,12 +287,12 @@ static inline void ppa_ll_sr_set_rx_yuv_range(ppa_dev_t *dev, color_range_t rang
} }
/** /**
* @brief Set PPA SR YUV output range * @brief Set PPA SRM YUV output range
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param range One of color range options in color_range_t * @param range One of color range options in color_range_t
*/ */
static inline void ppa_ll_sr_set_tx_yuv_range(ppa_dev_t *dev, color_range_t range) static inline void ppa_ll_srm_set_tx_yuv_range(ppa_dev_t *dev, color_range_t range)
{ {
switch (range) { switch (range) {
case COLOR_RANGE_LIMIT: case COLOR_RANGE_LIMIT:
@@ -308,31 +308,31 @@ static inline void ppa_ll_sr_set_tx_yuv_range(ppa_dev_t *dev, color_range_t rang
} }
/** /**
* @brief Enable PPA SR input data wrap in RGB (e.g. ARGB becomes BGRA, RGB becomes BGR) * @brief Enable PPA SRM input data wrap in RGB (e.g. ARGB becomes BGRA, RGB becomes BGR)
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param enable True to enable; False to disable * @param enable True to enable; False to disable
*/ */
static inline void ppa_ll_sr_enable_rx_rgb_swap(ppa_dev_t *dev, bool enable) static inline void ppa_ll_srm_enable_rx_rgb_swap(ppa_dev_t *dev, bool enable)
{ {
dev->sr_byte_order.sr_rx_rgb_swap_en = enable; dev->sr_byte_order.sr_rx_rgb_swap_en = enable;
} }
/** /**
* @brief Enable PPA SR input data swap in byte (The Byte0 and Byte1 would be swapped while byte 2 and byte 3 would be swappped) * @brief Enable PPA SRM input data swap in byte (The Byte0 and Byte1 would be swapped while byte 2 and byte 3 would be swappped)
* *
* Only supported when input color mode is ARGB8888 or RGB565. * Only supported when input color mode is ARGB8888 or RGB565.
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param enable True to enable; False to disable * @param enable True to enable; False to disable
*/ */
static inline void ppa_ll_sr_enable_rx_byte_swap(ppa_dev_t *dev, bool enable) static inline void ppa_ll_srm_enable_rx_byte_swap(ppa_dev_t *dev, bool enable)
{ {
dev->sr_byte_order.sr_rx_byte_swap_en = enable; dev->sr_byte_order.sr_rx_byte_swap_en = enable;
} }
/** /**
* @brief Configure PPA SR alpha value transformation mode * @brief Configure PPA SRM alpha value transformation mode
* *
* @param dev Peripheral instance address * @param dev Peripheral instance address
* @param mode Alpha value transformation mode, one of the values in ppa_alpha_mode_t * @param mode Alpha value transformation mode, one of the values in ppa_alpha_mode_t
@@ -340,7 +340,7 @@ static inline void ppa_ll_sr_enable_rx_byte_swap(ppa_dev_t *dev, bool enable)
* When PPA_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256) * When PPA_ALPHA_SCALE mode is selected, val/256 is the multiplier to the input alpha value (output_alpha = input_alpha * val / 256)
* When other modes are selected, this field is not used * When other modes are selected, this field is not used
*/ */
static inline void ppa_ll_sr_configure_rx_alpha(ppa_dev_t *dev, ppa_alpha_mode_t mode, uint32_t val) static inline void ppa_ll_srm_configure_rx_alpha(ppa_dev_t *dev, ppa_alpha_mode_t mode, uint32_t val)
{ {
switch (mode) { switch (mode) {
case PPA_ALPHA_NO_CHANGE: case PPA_ALPHA_NO_CHANGE:

View File

@@ -104,7 +104,7 @@ typedef enum {
DMA2D_TRIG_PERIPH_M2M, /*!< 2D-DMA trigger peripheral: M2M */ DMA2D_TRIG_PERIPH_M2M, /*!< 2D-DMA trigger peripheral: M2M */
DMA2D_TRIG_PERIPH_JPEG_ENCODER, /*!< 2D-DMA trigger peripheral: JPEG Encoder */ DMA2D_TRIG_PERIPH_JPEG_ENCODER, /*!< 2D-DMA trigger peripheral: JPEG Encoder */
DMA2D_TRIG_PERIPH_JPEG_DECODER, /*!< 2D-DMA trigger peripheral: JPEG Decoder */ DMA2D_TRIG_PERIPH_JPEG_DECODER, /*!< 2D-DMA trigger peripheral: JPEG Decoder */
DMA2D_TRIG_PERIPH_PPA_SR, /*!< 2D-DMA trigger peripheral: PPA SR engine */ DMA2D_TRIG_PERIPH_PPA_SRM, /*!< 2D-DMA trigger peripheral: PPA SRM engine */
DMA2D_TRIG_PERIPH_PPA_BLEND, /*!< 2D-DMA trigger peripheral: PPA Blending engine */ DMA2D_TRIG_PERIPH_PPA_BLEND, /*!< 2D-DMA trigger peripheral: PPA Blending engine */
} dma2d_trigger_peripheral_t; } dma2d_trigger_peripheral_t;

View File

@@ -17,34 +17,34 @@ extern "C" {
* @brief Enumeration of engines in PPA modules * @brief Enumeration of engines in PPA modules
*/ */
typedef enum { typedef enum {
PPA_ENGINE_TYPE_SR, /*!< PPA Scaling and Rotating (SR) engine, used to perform scale_and_rotate */ PPA_ENGINE_TYPE_SRM, /*!< PPA Scaling-Rotating-Mirroring (SRM) engine, used to perform scale, rotate, mirror */
PPA_ENGINE_TYPE_BLEND, /*!< PPA Blending engine, used to perform blend or fill */ PPA_ENGINE_TYPE_BLEND, /*!< PPA Blending engine, used to perform blend or fill */
} ppa_engine_type_t; } ppa_engine_type_t;
/** /**
* @brief Enumeration of PPA Scaling and Rotating available rotation angle (in the counterclockwise direction) * @brief Enumeration of PPA Scaling-Rotating-Mirroring available rotation angle (in the counterclockwise direction)
*/ */
typedef enum { typedef enum {
PPA_SR_ROTATION_ANGLE_0, /*!< Picture does no rotation */ PPA_SRM_ROTATION_ANGLE_0, /*!< Picture does no rotation */
PPA_SR_ROTATION_ANGLE_90, /*!< Picture rotates 90 degrees CCW */ PPA_SRM_ROTATION_ANGLE_90, /*!< Picture rotates 90 degrees CCW */
PPA_SR_ROTATION_ANGLE_180, /*!< Picture rotates 180 degrees CCW */ PPA_SRM_ROTATION_ANGLE_180, /*!< Picture rotates 180 degrees CCW */
PPA_SR_ROTATION_ANGLE_270, /*!< Picture rotates 270 degrees CCW */ PPA_SRM_ROTATION_ANGLE_270, /*!< Picture rotates 270 degrees CCW */
} ppa_sr_rotation_angle_t; } ppa_srm_rotation_angle_t;
/** /**
* @brief Enumeration of PPA Scaling and Rotating available color mode * @brief Enumeration of PPA Scaling-Rotating-Mirroring available color mode
*/ */
typedef enum { typedef enum {
PPA_SR_COLOR_MODE_ARGB8888 = COLOR_TYPE_ID(COLOR_SPACE_ARGB, COLOR_PIXEL_ARGB8888), /*!< PPA SR color mode: ARGB8888 */ PPA_SRM_COLOR_MODE_ARGB8888 = COLOR_TYPE_ID(COLOR_SPACE_ARGB, COLOR_PIXEL_ARGB8888), /*!< PPA SRM color mode: ARGB8888 */
PPA_SR_COLOR_MODE_RGB888 = COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB888), /*!< PPA SR color mode: RGB888 */ PPA_SRM_COLOR_MODE_RGB888 = COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB888), /*!< PPA SRM color mode: RGB888 */
PPA_SR_COLOR_MODE_RGB565 = COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB565), /*!< PPA SR color mode: RGB565 */ PPA_SRM_COLOR_MODE_RGB565 = COLOR_TYPE_ID(COLOR_SPACE_RGB, COLOR_PIXEL_RGB565), /*!< PPA SRM color mode: RGB565 */
PPA_SR_COLOR_MODE_YUV420 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV420), /*!< PPA SR color mode: YUV420 */ PPA_SRM_COLOR_MODE_YUV420 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV420), /*!< PPA SRM color mode: YUV420 */
PPA_SR_COLOR_MODE_YUV444 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV444), /*!< PPA SR color mode: YUV444 (limited range only)*/ PPA_SRM_COLOR_MODE_YUV444 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV444), /*!< PPA SRM color mode: YUV444 (limited range only)*/
PPA_SR_COLOR_MODE_YUV422 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV422), /*!< PPA SR color mode: YUV422 (input only, limited range only) */ PPA_SRM_COLOR_MODE_YUV422 = COLOR_TYPE_ID(COLOR_SPACE_YUV, COLOR_PIXEL_YUV422), /*!< PPA SRM color mode: YUV422 (input only, limited range only) */
// YUV444 and YUV422 not supported by PPA hardware, but seems like we can use 2D-DMA to do conversion before sending into and after coming out from the PPA module // YUV444 and YUV422 not supported by PPA hardware, but seems like we can use 2D-DMA to do conversion before sending into and after coming out from the PPA module
// If in_pic is YUV444/422, then TX DMA channnel could do DMA2D_CSC_TX_YUV444/422_TO_RGB888_601/709, so PPA in_color_mode is RGB888 // If in_pic is YUV444/422, then TX DMA channnel could do DMA2D_CSC_TX_YUV444/422_TO_RGB888_601/709, so PPA in_color_mode is RGB888
// If out_pic is YUV444, then RX DMA channel could do DMA2D_CSC_RX_YUV420_TO_YUV444, so PPA out_color_mode is YUV420 // If out_pic is YUV444, then RX DMA channel could do DMA2D_CSC_RX_YUV420_TO_YUV444, so PPA out_color_mode is YUV420
} ppa_sr_color_mode_t; } ppa_srm_color_mode_t;
/** /**
* @brief Enumeration of PPA Blending available color mode * @brief Enumeration of PPA Blending available color mode

View File

@@ -8,12 +8,12 @@
// The following macros are matched with the 2D-DMA peri_sel field peripheral selection ID // The following macros are matched with the 2D-DMA peri_sel field peripheral selection ID
#define SOC_DMA2D_TRIG_PERIPH_JPEG_RX (0) #define SOC_DMA2D_TRIG_PERIPH_JPEG_RX (0)
#define SOC_DMA2D_TRIG_PERIPH_PPA_SR_RX (1) #define SOC_DMA2D_TRIG_PERIPH_PPA_SRM_RX (1)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_RX (2) #define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_RX (2)
#define SOC_DMA2D_TRIG_PERIPH_M2M_RX (-1) // Any value of 3 ~ 7, TX and RX do not have to use same ID value for M2M #define SOC_DMA2D_TRIG_PERIPH_M2M_RX (-1) // Any value of 3 ~ 7, TX and RX do not have to use same ID value for M2M
#define SOC_DMA2D_TRIG_PERIPH_JPEG_TX (0) #define SOC_DMA2D_TRIG_PERIPH_JPEG_TX (0)
#define SOC_DMA2D_TRIG_PERIPH_PPA_SR_TX (1) #define SOC_DMA2D_TRIG_PERIPH_PPA_SRM_TX (1)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_BG_TX (2) #define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_BG_TX (2)
#define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_FG_TX (3) #define SOC_DMA2D_TRIG_PERIPH_PPA_BLEND_FG_TX (3)
#define SOC_DMA2D_TRIG_PERIPH_M2M_TX (-1) // Any value of 4 ~ 7, TX and RX do not have to use same ID value for M2M #define SOC_DMA2D_TRIG_PERIPH_M2M_TX (-1) // Any value of 4 ~ 7, TX and RX do not have to use same ID value for M2M