Add camera support

first automated cmake build
This commit is contained in:
me-no-dev
2020-10-12 23:56:00 +03:00
parent f8b72db3c6
commit 57c96aa4e3
201 changed files with 9180 additions and 10 deletions

View File

@ -455,6 +455,16 @@
#define CONFIG_WIFI_PROV_SCAN_MAX_ENTRIES 16
#define CONFIG_WIFI_PROV_AUTOSTOP_TIMEOUT 30
#define CONFIG_WPA_MBEDTLS_CRYPTO 1
#define CONFIG_XTENSA_IMPL 1
#define CONFIG_MTMN_LITE_QUANT 1
#define CONFIG_MFN56_1X 1
#define CONFIG_HD_NANO1 1
#define CONFIG_HP_NANO1 1
#define CONFIG_OV2640_SUPPORT 1
#define CONFIG_OV3660_SUPPORT 1
#define CONFIG_OV5640_SUPPORT 1
#define CONFIG_SCCB_HARDWARE_I2C_PORT1 1
#define CONFIG_CAMERA_CORE0 1
/* List of deprecated options */
#define CONFIG_A2DP_ENABLE CONFIG_BT_A2DP_ENABLE
@ -568,5 +578,5 @@
#define CONFIG_ULP_COPROC_ENABLED CONFIG_ESP32_ULP_COPROC_ENABLED
#define CONFIG_ULP_COPROC_RESERVE_MEM CONFIG_ESP32_ULP_COPROC_RESERVE_MEM
#define CONFIG_WARN_WRITE_STRINGS CONFIG_COMPILER_WARN_WRITE_STRINGS
#define CONFIG_ARDUINO_IDF_COMMIT ""
#define CONFIG_ARDUINO_IDF_BRANCH "release/v4.0"
#define CONFIG_ARDUINO_IDF_COMMIT "6c17e3a64"
#define CONFIG_ARDUINO_IDF_BRANCH "master"

View File

@ -0,0 +1,103 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "image_util.h"
#include "dl_lib_matrix3d.h"
#include "mtmn.h"
typedef enum
{
FAST = 0, /*!< fast resize type */
NORMAL = 1, /*!< normal resize type */
} mtmn_resize_type;
typedef struct
{
float score; /*!< score threshold for filter candidates by score */
float nms; /*!< nms threshold for nms process */
int candidate_number; /*!< candidate number limitation for each net */
} threshold_config_t;
typedef struct
{
int w; /*!< net width */
int h; /*!< net height */
threshold_config_t threshold; /*!< threshold of net */
} net_config_t;
typedef struct
{
float min_face; /*!< The minimum size of a detectable face */
float pyramid; /*!< The scale of the gradient scaling for the input images */
int pyramid_times; /*!< The pyramid resizing times */
threshold_config_t p_threshold; /*!< The thresholds for P-Net. For details, see the definition of threshold_config_t */
threshold_config_t r_threshold; /*!< The thresholds for R-Net. For details, see the definition of threshold_config_t */
threshold_config_t o_threshold; /*!< The thresholds for O-Net. For details, see the definition of threshold_config_t */
mtmn_resize_type type; /*!< The image resize type. 'pyramid' will lose efficacy, when 'type'==FAST. */
} mtmn_config_t;
/**
* @brief Get the initial MTMN model configuration
*
* @return mtmn_config_t MTMN configuration
*/
static inline mtmn_config_t mtmn_init_config()
{
mtmn_config_t mtmn_config;
mtmn_config.type = FAST;
mtmn_config.min_face = 80;
mtmn_config.pyramid = 0.707;
mtmn_config.pyramid_times = 4;
mtmn_config.p_threshold.score = 0.6;
mtmn_config.p_threshold.nms = 0.7;
mtmn_config.p_threshold.candidate_number = 20;
mtmn_config.r_threshold.score = 0.7;
mtmn_config.r_threshold.nms = 0.7;
mtmn_config.r_threshold.candidate_number = 10;
mtmn_config.o_threshold.score = 0.7;
mtmn_config.o_threshold.nms = 0.7;
mtmn_config.o_threshold.candidate_number = 1;
return mtmn_config;
}
/**
* @brief Do MTMN face detection, return box and landmark infomation.
*
* @param image_matrix Image matrix, rgb888 format
* @param config Configuration of MTMN i.e. score threshold, nms threshold, candidate number threshold, pyramid, min face size
* @return box_array_t* A list of boxes and score.
*/
box_array_t *face_detect(dl_matrix3du_t *image_matrix,
mtmn_config_t *config);
#if __cplusplus
}
#endif

View File

@ -0,0 +1,82 @@
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "fr_forward.h"
#define FR_FLASH_TYPE 32
#define FR_FLASH_SUBTYPE 32
#define FR_FLASH_PARTITION_NAME "fr"
#define FR_FLASH_INFO_FLAG 12138
/**
* @brief Produce face id according to the input aligned face, and save it to dest_id and flash.
*
* @param l Face id list
* @param aligned_face An aligned face
* @return -2 Flash partition not found
* @return 0 Enrollment finish
* @return >=1 The left piece of aligned faces should be input
*/
int8_t enroll_face_id_to_flash(face_id_list *l,
dl_matrix3du_t *aligned_face);
/**
* @brief Produce face id according to the input aligned face, and save the id-name pairs to dest_id and flash.
*
* @param l Face id list
* @param new_id An aligned face
* @param name name corresponding to face id
* @return -2 Flash partition not found
* @return 0 Enrollment finish
* @return >=1 The left piece of aligned faces should be input
*/
int8_t enroll_face_id_to_flash_with_name(face_id_name_list *l,
dl_matrix3d_t *new_id,
char *name);
/**
* @brief Read the enrolled face IDs from the flash.
*
* @param l Face id list
* @return int8_t The number of IDs remaining in flash
*/
int8_t read_face_id_from_flash(face_id_list *l);
/**
* @brief Read the enrolled face IDs and their corresponding names from the flash.
*
* @param l Face id list
* @return int8_t The number of IDs remaining in flash
*/
int8_t read_face_id_from_flash_with_name(face_id_name_list *l);
/**
* @brief Delete the enrolled face IDs in the flash.
*
* @param l Face id list
* @return int8_t The number of IDs remaining in flash
*/
int8_t delete_face_id_in_flash(face_id_list *l);
/**
* @brief Delete the enrolled face ID corresponding to the name in the flash.
*
* @param l Face id list
* @param name The name that needs to be deleted
* @return int8_t The number of IDs remaining in flash
*/
int8_t delete_face_id_in_flash_with_name(face_id_name_list *l, char *name);
/**
* @brief Delete all the enrolled face IDs and names paris in the flash.
*
* @param l Face id list
*/
void delete_face_all_in_flash_with_name(face_id_name_list *l);
#if __cplusplus
}
#endif

View File

@ -0,0 +1,194 @@
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "image_util.h"
#include "dl_lib_matrix3d.h"
#include "frmn.h"
#define FACE_WIDTH 56
#define FACE_HEIGHT 56
#define FACE_ID_SIZE 512
#define FACE_REC_THRESHOLD 0.55
#define LEFT_EYE_X 0
#define LEFT_EYE_Y 1
#define RIGHT_EYE_X 6
#define RIGHT_EYE_Y 7
#define NOSE_X 4
#define NOSE_Y 5
#define LEFT_MOUTH_X 2
#define LEFT_MOUTH_Y 3
#define RIGHT_MOUTH_X 8
#define RIGHT_MOUTH_Y 9
#define EYE_DIST_SET 16.5f
#define NOSE_EYE_RATIO_THRES_MIN 0.49f
#define NOSE_EYE_RATIO_THRES_MAX 2.04f
#define ENROLL_NAME_LEN 16
typedef struct tag_face_id_node
{
struct tag_face_id_node *next; /*!< next face id node */
char id_name[ENROLL_NAME_LEN]; /*!< name corresponding to the face id */
dl_matrix3d_t *id_vec; /*!< face id */
} face_id_node;
typedef struct
{
face_id_node *head; /*!< head pointer of the id list */
face_id_node *tail; /*!< tail pointer of the id list */
uint8_t count; /*!< number of enrolled ids */
uint8_t confirm_times; /*!< images needed for one enrolling */
} face_id_name_list;
typedef struct
{
uint8_t head; /*!< head index of the id list */
uint8_t tail; /*!< tail index of the id list */
uint8_t count; /*!< number of enrolled ids */
uint8_t size; /*!< max len of id list */
uint8_t confirm_times; /*!< images needed for one enrolling */
dl_matrix3d_t **id_list; /*!< stores face id vectors */
} face_id_list;
/**
* @brief Initialize face id list.
*
* @param l Face id list
* @param size Size of list, one list contains one vector
* @param confirm_times Enroll times for one id
*/
void face_id_init(face_id_list *l, uint8_t size, uint8_t confirm_times);
/**
* @brief Initialize face id list with name.
*
* @param l Face id list
* @param size Size of list, one list contains one vector
* @param confirm_times Enroll times for one id
*/
void face_id_name_init(face_id_name_list *l, uint8_t size, uint8_t confirm_times);
/**
* @brief Alloc memory for aligned face.
*
* @return dl_matrix3du_t* Size: 1xFACE_WIDTHxFACE_HEIGHTx3
*/
dl_matrix3du_t *aligned_face_alloc();
/**@{*/
/**
* @brief Align detected face to average face according to landmark.
*
* @param onet_boxes Output of MTMN with box and landmark
* @param src Image matrix, rgb888 format
* @param dest Output image
* @return ESP_OK Input face is good for recognition
* @return ESP_FAIL Input face is not good for recognition
*/
int8_t align_face_rot(box_array_t *onet_boxes,
dl_matrix3du_t *src,
dl_matrix3du_t *dest);
int8_t align_face_sim(box_array_t *onet_boxes,
dl_matrix3du_t *src,
dl_matrix3du_t *dest);
inline int8_t align_face(box_array_t *onet_boxes,
dl_matrix3du_t *src,
dl_matrix3du_t *dest)
{
return align_face_sim(onet_boxes, src, dest);
}
/**@}*/
/**
* @brief Run the face recognition model to get the face feature
*
* @param aligned_face A 56x56x3 image, the variable need to do align_face first
* @return face_id A 512 vector, size (1, 1, 1, 512)
*/
dl_matrix3d_t *get_face_id(dl_matrix3du_t *aligned_face);
/**
* @brief Add src_id to dest_id
*
* @param dest_id Face id after accumulation
* @param src_id Face id to be added
*/
void add_face_id(dl_matrix3d_t *dest_id,
dl_matrix3d_t *src_id);
/**
* @brief Match face with the id_list, and return matched_id.
*
* @param l An ID list
* @param algined_face An aligned face
* @return int8_t Matched face id
*/
int8_t recognize_face(face_id_list *l, dl_matrix3du_t *algined_face);
/**
* @brief Match face id with the id_list, and return matched face id node.
*
* @param l
* @param face_id
* @return face_id_node*
*/
face_id_node *recognize_face_with_name(face_id_name_list *l, dl_matrix3d_t *face_id);
/**
* @brief Produce face id according to the input aligned face, and save it to dest_id.
*
* @param l Face id list
* @param aligned_face An aligned face
* @param enroll_confirm_times Confirm times for each face id enrollment
* @return -1 Wrong input enroll_confirm_times
* @return 0 Enrollment finish
* @return >=1 The left piece of aligned faces should be input
*/
int8_t enroll_face(face_id_list *l, dl_matrix3du_t *aligned_face);
/**
* @brief Produce face id according to the input aligned face, and save the id-name pairs to dest_id
*
* @param l Face id list with name
* @param new_id A face id that need to be enrolled
* @param name name corresponding to the face id
* @return int8_t The left piece of aligned faces should be input
*/
int8_t enroll_face_with_name(face_id_name_list *l,
dl_matrix3d_t *new_id,
char *name);
/**
* @brief Delete the enrolled face IDs
*
* @param l Face id list
* @return uint8_t The number of IDs remaining in face id list
*/
uint8_t delete_face(face_id_list *l);
/**
* @brief Delete the enrolled face IDs and associated names
*
* @param l Face id list
* @param name The name that needs to be deleted
* @return int8_t The number of IDs remaining in face id list
*/
int8_t delete_face_with_name(face_id_name_list *l, char *name);
/**
* @brief Delete all the enrolled face IDs and names paris
*
* @param l Face id list with names
*/
void delete_face_all_with_name(face_id_name_list *l);
#if __cplusplus
}
#endif

View File

@ -0,0 +1,344 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdint.h>
#include <math.h>
#include <assert.h>
#ifdef __cplusplus
}
#endif
typedef enum
{
IMAGE_RESIZE_BILINEAR = 0, /*<! Resize image by taking bilinear of four pixels */
IMAGE_RESIZE_MEAN = 1, /*<! Resize image by taking mean of four pixels */
IMAGE_RESIZE_NEAREST = 2 /*<! Resize image by taking the nearest pixel */
} image_resize_t;
template <class T>
class Image
{
public:
/**
* @brief Convert a RGB565 pixel to RGB888
*
* @param input Pixel value in RGB565
* @param output Pixel value in RGB888
*/
static inline void pixel_rgb565_to_rgb888(uint16_t input, T *output)
{
output[2] = (input & 0x1F00) >> 5; //blue
output[1] = ((input & 0x7) << 5) | ((input & 0xE000) >> 11); //green
output[0] = input & 0xF8; //red
};
/**
* @brief Resize a RGB565 image to a RGB88 image
*
* @param dst_image The destination image
* @param y_start The start y index of where resized image located
* @param y_end The end y index of where resized image located
* @param x_start The start x index of where resized image located
* @param x_end The end x index of where resized image located
* @param channel The channel number of image
* @param src_image The source image
* @param src_h The height of source image
* @param src_w The width of source image
* @param dst_w The width of destination image
* @param shift_left The bit number of left shifting
* @param type The resize type
*/
static void resize_to_rgb888(T *dst_image, int y_start, int y_end, int x_start, int x_end, int channel, uint16_t *src_image, int src_h, int src_w, int dst_w, int shift_left, image_resize_t type);
/**
* @brief Resize a RGB888 image to a RGB88 image
*
* @param dst_image The destination image
* @param y_start The start y index of where resized image located
* @param y_end The end y index of where resized image located
* @param x_start The start x index of where resized image located
* @param x_end The end x index of where resized image located
* @param channel The channel number of image
* @param src_image The source image
* @param src_h The height of source image
* @param src_w The width of source image
* @param dst_w The width of destination image
* @param shift_left The bit number of left shifting
* @param type The resize type
*/
static void resize_to_rgb888(T *dst_image, int y_start, int y_end, int x_start, int x_end, int channel, uint8_t *src_image, int src_h, int src_w, int dst_w, int shift_left, image_resize_t type);
// static void resize_to_rgb565(uint16_t *dst_image, int y_start, int y_end, int x_start, int x_end, int channel, uint16_t *src_image, int src_h, int src_w, int dst_w, int shift_left, image_resize_t type);
// static void resize_to_rgb565(uint16_t *dst_image, int y_start, int y_end, int x_start, int x_end, int channel, uint8_t *src_image, int src_h, int src_w, int dst_w, int shift_left, image_resize_t type);
};
template <class T>
void Image<T>::resize_to_rgb888(T *dst_image, int y_start, int y_end, int x_start, int x_end, int channel, uint16_t *src_image, int src_h, int src_w, int dst_w, int shift_left, image_resize_t type)
{
assert(channel == 3);
float scale_y = (float)src_h / (y_end - y_start);
float scale_x = (float)src_w / (x_end - x_start);
int temp[13];
switch (type)
{
case IMAGE_RESIZE_BILINEAR:
for (size_t y = y_start; y < y_end; y++)
{
float ratio_y[2];
ratio_y[0] = (float)((y + 0.5) * scale_y - 0.5); // y
int src_y = (int)ratio_y[0]; // y1
ratio_y[0] -= src_y; // y - y1
if (src_y < 0)
{
ratio_y[0] = 0;
src_y = 0;
}
if (src_y > src_h - 2)
{
ratio_y[0] = 0;
src_y = src_h - 2;
}
ratio_y[1] = 1 - ratio_y[0]; // y2 - y
int _dst_i = y * dst_w;
int _src_row_0 = src_y * src_w;
int _src_row_1 = _src_row_0 + src_w;
for (size_t x = x_start; x < x_end; x++)
{
float ratio_x[2];
ratio_x[0] = (float)((x + 0.5) * scale_x - 0.5); // x
int src_x = (int)ratio_x[0]; // x1
ratio_x[0] -= src_x; // x - x1
if (src_x < 0)
{
ratio_x[0] = 0;
src_x = 0;
}
if (src_x > src_w - 2)
{
ratio_x[0] = 0;
src_x = src_w - 2;
}
ratio_x[1] = 1 - ratio_x[0]; // x2 - x
int dst_i = (_dst_i + x) * channel;
int src_row_0 = _src_row_0 + src_x;
int src_row_1 = _src_row_1 + src_x;
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_0], temp);
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_0 + 1], temp + 3);
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_1], temp + 6);
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_1 + 1], temp + 9);
for (int c = 0; c < channel; c++)
{
temp[12] = round(temp[c] * ratio_x[1] * ratio_y[1] + temp[channel + c] * ratio_x[0] * ratio_y[1] + temp[channel + channel + c] * ratio_x[1] * ratio_y[0] + src_image[channel + channel + channel + c] * ratio_x[0] * ratio_y[0]);
dst_image[dst_i + c] = (shift_left > 0) ? (temp[12] << shift_left) : (temp[12] >> -shift_left);
}
}
}
break;
case IMAGE_RESIZE_MEAN:
shift_left -= 2;
for (int y = y_start; y < y_end; y++)
{
int _dst_i = y * dst_w;
float _src_row_0 = rintf(y * scale_y) * src_w;
float _src_row_1 = _src_row_0 + src_w;
for (int x = x_start; x < x_end; x++)
{
int dst_i = (_dst_i + x) * channel;
int src_row_0 = (_src_row_0 + rintf(x * scale_x));
int src_row_1 = (_src_row_1 + rintf(x * scale_x));
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_0], temp);
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_0 + 1], temp + 3);
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_1], temp + 6);
Image<int>::pixel_rgb565_to_rgb888(src_image[src_row_1 + 1], temp + 9);
dst_image[dst_i] = (shift_left > 0) ? ((temp[0] + temp[3] + temp[6] + temp[9]) << shift_left) : ((temp[0] + temp[3] + temp[6] + temp[9]) >> -shift_left);
dst_image[dst_i + 1] = (shift_left > 0) ? ((temp[1] + temp[4] + temp[7] + temp[10]) << shift_left) : ((temp[1] + temp[4] + temp[7] + temp[10]) >> -shift_left);
dst_image[dst_i + 2] = (shift_left > 0) ? ((temp[2] + temp[5] + temp[8] + temp[11]) << shift_left) : ((temp[1] + temp[4] + temp[7] + temp[10]) >> -shift_left);
}
}
break;
case IMAGE_RESIZE_NEAREST:
for (size_t y = y_start; y < y_end; y++)
{
int _dst_i = y * dst_w;
float _src_i = rintf(y * scale_y) * src_w;
for (size_t x = x_start; x < x_end; x++)
{
int dst_i = (_dst_i + x) * channel;
int src_i = _src_i + rintf(x * scale_x);
Image<int>::pixel_rgb565_to_rgb888(src_image[src_i], temp);
dst_image[dst_i] = (shift_left > 0) ? (temp[0] << shift_left) : (temp[0] >> -shift_left);
dst_image[dst_i + 1] = (shift_left > 0) ? (temp[1] << shift_left) : (temp[1] >> -shift_left);
dst_image[dst_i + 2] = (shift_left > 0) ? (temp[2] << shift_left) : (temp[2] >> -shift_left);
}
}
break;
default:
break;
}
}
template <class T>
void Image<T>::resize_to_rgb888(T *dst_image, int y_start, int y_end, int x_start, int x_end, int channel, uint8_t *src_image, int src_h, int src_w, int dst_w, int shift_left, image_resize_t type)
{
float scale_y = (float)src_h / (y_end - y_start);
float scale_x = (float)src_w / (x_end - x_start);
int temp;
switch (type)
{
case IMAGE_RESIZE_BILINEAR:
for (size_t y = y_start; y < y_end; y++)
{
float ratio_y[2];
ratio_y[0] = (float)((y + 0.5) * scale_y - 0.5); // y
int src_y = (int)ratio_y[0]; // y1
ratio_y[0] -= src_y; // y - y1
if (src_y < 0)
{
ratio_y[0] = 0;
src_y = 0;
}
if (src_y > src_h - 2)
{
ratio_y[0] = 0;
src_y = src_h - 2;
}
ratio_y[1] = 1 - ratio_y[0]; // y2 - y
int _dst_i = y * dst_w;
int _src_row_0 = src_y * src_w;
int _src_row_1 = _src_row_0 + src_w;
for (size_t x = x_start; x < x_end; x++)
{
float ratio_x[2];
ratio_x[0] = (float)((x + 0.5) * scale_x - 0.5); // x
int src_x = (int)ratio_x[0]; // x1
ratio_x[0] -= src_x; // x - x1
if (src_x < 0)
{
ratio_x[0] = 0;
src_x = 0;
}
if (src_x > src_w - 2)
{
ratio_x[0] = 0;
src_x = src_w - 2;
}
ratio_x[1] = 1 - ratio_x[0]; // x2 - x
int dst_i = (_dst_i + x) * channel;
int src_row_0 = (_src_row_0 + src_x) * channel;
int src_row_1 = (_src_row_1 + src_x) * channel;
for (int c = 0; c < channel; c++)
{
temp = round(src_image[src_row_0 + c] * ratio_x[1] * ratio_y[1] + src_image[src_row_0 + channel + c] * ratio_x[0] * ratio_y[1] + src_image[src_row_1 + c] * ratio_x[1] * ratio_y[0] + src_image[src_row_1 + channel + c] * ratio_x[0] * ratio_y[0]);
dst_image[dst_i + c] = (shift_left > 0) ? (temp << shift_left) : (temp >> -shift_left);
}
}
}
break;
case IMAGE_RESIZE_MEAN:
shift_left -= 2;
for (size_t y = y_start; y < y_end; y++)
{
int _dst_i = y * dst_w;
float _src_row_0 = rintf(y * scale_y) * src_w;
float _src_row_1 = _src_row_0 + src_w;
for (size_t x = x_start; x < x_end; x++)
{
int dst_i = (_dst_i + x) * channel;
int src_row_0 = (_src_row_0 + rintf(x * scale_x)) * channel;
int src_row_1 = (_src_row_1 + rintf(x * scale_x)) * channel;
for (size_t c = 0; c < channel; c++)
{
temp = (int)src_image[src_row_0 + c] + (int)src_image[src_row_0 + channel + c] + (int)src_image[src_row_1 + c] + (int)src_image[src_row_1 + channel + c];
dst_image[dst_i + c] = (shift_left > 0) ? (temp << shift_left) : (temp >> -shift_left);
}
}
}
break;
case IMAGE_RESIZE_NEAREST:
for (size_t y = y_start; y < y_end; y++)
{
int _dst_i = y * dst_w;
float _src_i = rintf(y * scale_y) * src_w;
for (size_t x = x_start; x < x_end; x++)
{
int dst_i = (_dst_i + x) * channel;
int src_i = (_src_i + rintf(x * scale_x)) * channel;
for (size_t c = 0; c < channel; c++)
{
dst_image[dst_i + c] = (shift_left > 0) ? ((T)src_image[src_i + c] << shift_left) : ((T)src_image[src_i + c] >> -shift_left);
}
}
}
break;
default:
break;
}
}

View File

@ -0,0 +1,548 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdint.h>
#include <math.h>
#include "mtmn.h"
#define LANDMARKS_NUM (10)
#define MAX_VALID_COUNT_PER_IMAGE (30)
#define DL_IMAGE_MIN(A, B) ((A) < (B) ? (A) : (B))
#define DL_IMAGE_MAX(A, B) ((A) < (B) ? (B) : (A))
#define RGB565_MASK_RED 0xF800
#define RGB565_MASK_GREEN 0x07E0
#define RGB565_MASK_BLUE 0x001F
typedef enum
{
BINARY, /*!< binary */
} en_threshold_mode;
typedef struct
{
fptp_t landmark_p[LANDMARKS_NUM]; /*!< landmark struct */
} landmark_t;
typedef struct
{
fptp_t box_p[4]; /*!< box struct */
} box_t;
typedef struct tag_box_list
{
uint8_t *category; /*!< The category of the corresponding box */
fptp_t *score; /*!< The confidence score of the class corresponding to the box */
box_t *box; /*!< Anchor boxes or predicted boxes*/
landmark_t *landmark; /*!< The landmarks corresponding to the box */
int len; /*!< The num of the boxes */
} box_array_t;
typedef struct tag_image_box
{
struct tag_image_box *next; /*!< Next image_box_t */
uint8_t category;
fptp_t score; /*!< The confidence score of the class corresponding to the box */
box_t box; /*!< Anchor boxes or predicted boxes */
box_t offset; /*!< The predicted anchor-based offset */
landmark_t landmark; /*!< The landmarks corresponding to the box */
} image_box_t;
typedef struct tag_image_list
{
image_box_t *head; /*!< The current head of the image_list */
image_box_t *origin_head; /*!< The original head of the image_list */
int len; /*!< Length of the image_list */
} image_list_t;
/**
* @brief Get the width and height of the box.
*
* @param box Input box
* @param w Resulting width of the box
* @param h Resulting height of the box
*/
static inline void image_get_width_and_height(box_t *box, float *w, float *h)
{
*w = box->box_p[2] - box->box_p[0] + 1;
*h = box->box_p[3] - box->box_p[1] + 1;
}
/**
* @brief Get the area of the box.
*
* @param box Input box
* @param area Resulting area of the box
*/
static inline void image_get_area(box_t *box, float *area)
{
float w, h;
image_get_width_and_height(box, &w, &h);
*area = w * h;
}
/**
* @brief calibrate the boxes by offset
*
* @param image_list Input boxes
* @param image_height Height of the original image
* @param image_width Width of the original image
*/
static inline void image_calibrate_by_offset(image_list_t *image_list, int image_height, int image_width)
{
for (image_box_t *head = image_list->head; head; head = head->next)
{
float w, h;
image_get_width_and_height(&(head->box), &w, &h);
head->box.box_p[0] = DL_IMAGE_MAX(0, head->box.box_p[0] + head->offset.box_p[0] * w);
head->box.box_p[1] = DL_IMAGE_MAX(0, head->box.box_p[1] + head->offset.box_p[1] * w);
head->box.box_p[2] += head->offset.box_p[2] * w;
if (head->box.box_p[2] > image_width)
{
head->box.box_p[2] = image_width - 1;
head->box.box_p[0] = image_width - w;
}
head->box.box_p[3] += head->offset.box_p[3] * h;
if (head->box.box_p[3] > image_height)
{
head->box.box_p[3] = image_height - 1;
head->box.box_p[1] = image_height - h;
}
}
}
/**
* @brief calibrate the landmarks
*
* @param image_list Input landmarks
*/
static inline void image_landmark_calibrate(image_list_t *image_list)
{
for (image_box_t *head = image_list->head; head; head = head->next)
{
float w, h;
image_get_width_and_height(&(head->box), &w, &h);
head->landmark.landmark_p[0] = head->box.box_p[0] + head->landmark.landmark_p[0] * w;
head->landmark.landmark_p[1] = head->box.box_p[1] + head->landmark.landmark_p[1] * h;
head->landmark.landmark_p[2] = head->box.box_p[0] + head->landmark.landmark_p[2] * w;
head->landmark.landmark_p[3] = head->box.box_p[1] + head->landmark.landmark_p[3] * h;
head->landmark.landmark_p[4] = head->box.box_p[0] + head->landmark.landmark_p[4] * w;
head->landmark.landmark_p[5] = head->box.box_p[1] + head->landmark.landmark_p[5] * h;
head->landmark.landmark_p[6] = head->box.box_p[0] + head->landmark.landmark_p[6] * w;
head->landmark.landmark_p[7] = head->box.box_p[1] + head->landmark.landmark_p[7] * h;
head->landmark.landmark_p[8] = head->box.box_p[0] + head->landmark.landmark_p[8] * w;
head->landmark.landmark_p[9] = head->box.box_p[1] + head->landmark.landmark_p[9] * h;
}
}
/**
* @brief Convert a rectangular box into a square box
*
* @param boxes Input box
* @param width Width of the orignal image
* @param height height of the orignal image
*/
static inline void image_rect2sqr(box_array_t *boxes, int width, int height)
{
for (int i = 0; i < boxes->len; i++)
{
box_t *box = &(boxes->box[i]);
int x1 = round(box->box_p[0]);
int y1 = round(box->box_p[1]);
int x2 = round(box->box_p[2]);
int y2 = round(box->box_p[3]);
int w = x2 - x1 + 1;
int h = y2 - y1 + 1;
int l = DL_IMAGE_MAX(w, h);
box->box_p[0] = DL_IMAGE_MAX(round(DL_IMAGE_MAX(0, x1) + 0.5 * (w - l)), 0);
box->box_p[1] = DL_IMAGE_MAX(round(DL_IMAGE_MAX(0, y1) + 0.5 * (h - l)), 0);
box->box_p[2] = box->box_p[0] + l - 1;
if (box->box_p[2] > width)
{
box->box_p[2] = width - 1;
box->box_p[0] = width - l;
}
box->box_p[3] = box->box_p[1] + l - 1;
if (box->box_p[3] > height)
{
box->box_p[3] = height - 1;
box->box_p[1] = height - l;
}
}
}
/**@{*/
/**
* @brief Convert RGB565 image to RGB888 image
*
* @param in Input RGB565 image
* @param dst Resulting RGB888 image
*/
static inline void rgb565_to_888(uint16_t in, uint8_t *dst)
{ /*{{{*/
in = (in & 0xFF) << 8 | (in & 0xFF00) >> 8;
dst[2] = (in & RGB565_MASK_BLUE) << 3; // blue
dst[1] = (in & RGB565_MASK_GREEN) >> 3; // green
dst[0] = (in & RGB565_MASK_RED) >> 8; // red
// dst[0] = (in & 0x1F00) >> 5;
// dst[1] = ((in & 0x7) << 5) | ((in & 0xE000) >> 11);
// dst[2] = in & 0xF8;
} /*}}}*/
static inline void rgb565_to_888_q16(uint16_t in, int16_t *dst)
{ /*{{{*/
in = (in & 0xFF) << 8 | (in & 0xFF00) >> 8;
dst[2] = (in & RGB565_MASK_BLUE) << 3; // blue
dst[1] = (in & RGB565_MASK_GREEN) >> 3; // green
dst[0] = (in & RGB565_MASK_RED) >> 8; // red
// dst[0] = (in & 0x1F00) >> 5;
// dst[1] = ((in & 0x7) << 5) | ((in & 0xE000) >> 11);
// dst[2] = in & 0xF8;
} /*}}}*/
/**@}*/
/**
* @brief Convert RGB888 image to RGB565 image
*
* @param in Resulting RGB565 image
* @param r The red channel of the Input RGB888 image
* @param g The green channel of the Input RGB888 image
* @param b The blue channel of the Input RGB888 image
*/
static inline void rgb888_to_565(uint16_t *in, uint8_t r, uint8_t g, uint8_t b)
{ /*{{{*/
uint16_t rgb565 = 0;
rgb565 = ((r >> 3) << 11);
rgb565 |= ((g >> 2) << 5);
rgb565 |= (b >> 3);
rgb565 = (rgb565 & 0xFF) << 8 | (rgb565 & 0xFF00) >> 8;
*in = rgb565;
} /*}}}*/
/**
* @brief Filter out the resulting boxes whose confidence score is lower than the threshold and convert the boxes to the actual boxes on the original image.((x, y, w, h) -> (x1, y1, x2, y2))
*
* @param score Confidence score of the boxes
* @param offset The predicted anchor-based offset
* @param landmark The landmarks corresponding to the box
* @param width Height of the original image
* @param height Width of the original image
* @param anchor_number Anchor number of the detection output feature map
* @param anchors_size The anchor size
* @param score_threshold Threshold of the confidence score
* @param stride
* @param resized_height_scale
* @param resized_width_scale
* @param do_regression
* @return image_list_t*
*/
image_list_t *image_get_valid_boxes(fptp_t *score,
fptp_t *offset,
fptp_t *landmark,
int width,
int height,
int anchor_number,
int *anchors_size,
fptp_t score_threshold,
int stride,
fptp_t resized_height_scale,
fptp_t resized_width_scale,
bool do_regression);
/**
* @brief Sort the resulting box lists by their confidence score.
*
* @param image_sorted_list The sorted box list.
* @param insert_list The box list that have not been sorted.
*/
void image_sort_insert_by_score(image_list_t *image_sorted_list, const image_list_t *insert_list);
/**
* @brief Run NMS algorithm
*
* @param image_list The input boxes list
* @param nms_threshold NMS threshold
* @param same_area The flag of boxes with same area
*/
void image_nms_process(image_list_t *image_list, fptp_t nms_threshold, int same_area);
/**
* @brief Resize an image to half size
*
* @param dimage The output image
* @param dw Width of the output image
* @param dh Height of the output image
* @param dc Channel of the output image
* @param simage Source image
* @param sw Width of the source image
* @param sc Channel of the source image
*/
void image_zoom_in_twice(uint8_t *dimage,
int dw,
int dh,
int dc,
uint8_t *simage,
int sw,
int sc);
/**
* @brief Resize the image in RGB888 format via bilinear interpolation
*
* @param dst_image The output image
* @param src_image Source image
* @param dst_w Width of the output image
* @param dst_h Height of the output image
* @param dst_c Channel of the output image
* @param src_w Width of the source image
* @param src_h Height of the source image
*/
void image_resize_linear(uint8_t *dst_image, uint8_t *src_image, int dst_w, int dst_h, int dst_c, int src_w, int src_h);
/**
* @brief Crop rotate and zoom the image in RGB888 format,
*
* @param corp_image The output image
* @param src_image Source image
* @param rotate_angle Rotate angle
* @param ratio scaling ratio
* @param center Center of rotation
*/
void image_cropper(uint8_t *corp_image, uint8_t *src_image, int dst_w, int dst_h, int dst_c, int src_w, int src_h, float rotate_angle, float ratio, float *center);
/**
* @brief Convert the rgb565 image to the rgb888 image
*
* @param m The output rgb888 image
* @param bmp The input rgb565 image
* @param count Total pixels of the rgb565 image
*/
void image_rgb565_to_888(uint8_t *m, uint16_t *bmp, int count);
/**
* @brief Convert the rgb888 image to the rgb565 image
*
* @param bmp The output rgb565 image
* @param m The input rgb888 image
* @param count Total pixels of the rgb565 image
*/
void image_rgb888_to_565(uint16_t *bmp, uint8_t *m, int count);
/**
* @brief draw rectangle on the rgb565 image
*
* @param buf Input image
* @param boxes Rectangle Boxes
* @param width Width of the input image
*/
void draw_rectangle_rgb565(uint16_t *buf, box_array_t *boxes, int width);
/**
* @brief draw rectangle on the rgb888 image
*
* @param buf Input image
* @param boxes Rectangle Boxes
* @param width Width of the input image
*/
void draw_rectangle_rgb888(uint8_t *buf, box_array_t *boxes, int width);
/**
* @brief Get the pixel difference of two images
*
* @param dst The output pixel difference
* @param src1 Input image 1
* @param src2 Input image 2
* @param count Total pixels of the input image
*/
void image_abs_diff(uint8_t *dst, uint8_t *src1, uint8_t *src2, int count);
/**
* @brief Binarize an image to 0 and value.
*
* @param dst The output image
* @param src Source image
* @param threshold Threshold of binarization
* @param value The value of binarization
* @param count Total pixels of the input image
* @param mode Threshold mode
*/
void image_threshold(uint8_t *dst, uint8_t *src, int threshold, int value, int count, en_threshold_mode mode);
/**
* @brief Erode the image
*
* @param dst The output image
* @param src Source image
* @param src_w Width of the source image
* @param src_h Height of the source image
* @param src_c Channel of the source image
*/
void image_erode(uint8_t *dst, uint8_t *src, int src_w, int src_h, int src_c);
typedef float matrixType;
typedef struct
{
int w; /*!< width */
int h; /*!< height */
matrixType **array; /*!< array */
} Matrix;
/**
* @brief Allocate a 2d matrix
*
* @param h Height of matrix
* @param w Width of matrix
* @return Matrix* 2d matrix
*/
Matrix *matrix_alloc(int h, int w);
/**
* @brief Free a 2d matrix
*
* @param m 2d matrix
*/
void matrix_free(Matrix *m);
/**
* @brief Get the similarity matrix of similarity transformation
*
* @param srcx Source x coordinates
* @param srcy Source y coordinates
* @param dstx Destination x coordinates
* @param dsty Destination y coordinates
* @param num The number of the coordinates
* @return Matrix* The resulting transformation matrix
*/
Matrix *get_similarity_matrix(float *srcx, float *srcy, float *dstx, float *dsty, int num);
/**
* @brief Get the affine transformation matrix
*
* @param srcx Source x coordinates
* @param srcy Source y coordinates
* @param dstx Destination x coordinates
* @param dsty Destination y coordinates
* @return Matrix* The resulting transformation matrix
*/
Matrix *get_affine_transform(float *srcx, float *srcy, float *dstx, float *dsty);
/**
* @brief Applies an affine transformation to an image
*
* @param img Input image
* @param crop Dst output image that has the size dsize and the same type as src
* @param M Affine transformation matrix
*/
void warp_affine(dl_matrix3du_t *img, dl_matrix3du_t *crop, Matrix *M);
/**
* @brief Resize the image in RGB888 format via bilinear interpolation, and quantify the output image
*
* @param dst_image Quantized output image
* @param src_image Input image
* @param dst_w Width of the output image
* @param dst_h Height of the output image
* @param dst_c Channel of the output image
* @param src_w Width of the input image
* @param src_h Height of the input image
* @param shift Shift parameter of quantization.
*/
void image_resize_linear_q(qtp_t *dst_image, uint8_t *src_image, int dst_w, int dst_h, int dst_c, int src_w, int src_h, int shift);
/**
* @brief Preprocess the input image of object detection model. The process is like this: resize -> normalize -> quantify
*
* @param image Input image, RGB888 format.
* @param input_w Width of the input image.
* @param input_h Height of the input image.
* @param target_size Target size of the model input image.
* @param exponent Exponent of the quantized model input image.
* @param process_mode Process mode. 0: resize with padding to keep height == width. 1: resize without padding, height != width.
* @return dl_matrix3dq_t* The resulting preprocessed image.
*/
dl_matrix3dq_t *image_resize_normalize_quantize(uint8_t *image, int input_w, int input_h, int target_size, int exponent, int process_mode);
/**
* @brief Resize the image in RGB565 format via mean neighbour interpolation, and quantify the output image
*
* @param dimage Quantized output image.
* @param simage Input image.
* @param dw Width of the allocated output image memory.
* @param dc Channel of the allocated output image memory.
* @param sw Width of the input image.
* @param sh Height of the input image.
* @param tw Target width of the output image.
* @param th Target height of the output image.
* @param shift Shift parameter of quantization.
*/
void image_resize_shift_fast(qtp_t *dimage, uint16_t *simage, int dw, int dc, int sw, int sh, int tw, int th, int shift);
/**
* @brief Resize the image in RGB565 format via nearest neighbour interpolation, and quantify the output image
*
* @param dimage Quantized output image.
* @param simage Input image.
* @param dw Width of the allocated output image memory.
* @param dc Channel of the allocated output image memory.
* @param sw Width of the input image.
* @param sh Height of the input image.
* @param tw Target width of the output image.
* @param th Target height of the output image.
* @param shift Shift parameter of quantization.
*/
void image_resize_nearest_shift(qtp_t *dimage, uint16_t *simage, int dw, int dc, int sw, int sh, int tw, int th, int shift);
/**
* @brief Crop the image in RGB565 format and resize it to target size, then quantify the output image
*
* @param dimage Quantized output image.
* @param simage Input image.
* @param dw Target size of the output image.
* @param sw Width of the input image.
* @param sh Height of the input image.
* @param x1 The x coordinate of the upper left corner of the cropped area
* @param y1 The y coordinate of the upper left corner of the cropped area
* @param x2 The x coordinate of the lower right corner of the cropped area
* @param y2 The y coordinate of the lower right corner of the cropped area
* @param shift Shift parameter of quantization.
*/
void image_crop_shift_fast(qtp_t *dimage, uint16_t *simage, int dw, int sw, int sh, int x1, int y1, int x2, int y2, int shift);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,40 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person_body obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include "dl_lib_matrix3d.h"
#include "dl_lib_matrix3dq.h"
#include "freertos/FreeRTOS.h"
#include "detection.h"
extern detection_model_t cat_face_3_model;
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,87 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include "dl_lib_matrix3d.h"
#include "dl_lib_matrix3dq.h"
#include "freertos/FreeRTOS.h"
typedef enum
{
Anchor_Point, /*<! Anchor point detection model*/
Anchor_Box /*<! Anchor box detection model */
} detection_model_type_t;
typedef struct
{
int **anchors_shape; /*<! Anchor shape of this stage */
int stride; /*<! Zoom in stride of this stage */
int boundary; /*<! Detection image low-limit of this stage */
int project_offset; /*<! Project offset of this stage */
} detection_stage_config_t;
typedef struct
{
dl_matrix3dq_t *score; /*<! score feature map of this stage*/
dl_matrix3dq_t *box_offset; /*<! box_offset feature map of this stage*/
dl_matrix3dq_t *landmark_offset; /*<! landmark_offset feature map of this stage */
} detection_stage_result_t;
typedef struct
{
int resized_height; /*<! The height after resized */
int resized_width; /*<! The width after resized */
fptp_t y_resize_scale; /*<! resized_height / input_height */
fptp_t x_resize_scale; /*<! resized_width / input_width */
qtp_t score_threshold; /*<! Score threshold of detection model */
fptp_t nms_threshold; /*<! NMS threshold of detection model */
bool with_landmark; /*<! Whether detection with landmark, true: with, false: without */
bool free_image; /*<! Whether free the resized image */
int enabled_top_k; /*<! The number of enabled stages */
} detection_model_config_t;
typedef struct
{
detection_stage_config_t *stage_config; /*<! Configuration of each stage */
int stage_number; /*<! The number of stages */
detection_model_type_t model_type; /*<! The type of detection model */
detection_model_config_t model_config; /*<! Configuration of detection model */
detection_stage_result_t *(*op)(dl_matrix3dq_t *, detection_model_config_t *); /*<! The function of detection inference */
void *(*get_boxes)(detection_stage_result_t *, detection_model_config_t *, detection_stage_config_t *, int); /*<! The function of how to get real boxes */
} detection_model_t;
/**
* @brief free 'detection_stage_result_t' type value
*
* @param value A 'detection_stage_result_t' type value
*/
void free_detection_stage_result(detection_stage_result_t value);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,819 @@
#pragma once
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#if CONFIG_SPIRAM_SUPPORT || CONFIG_ESP32_SPIRAM_SUPPORT
#include "freertos/FreeRTOS.h"
#define DL_SPIRAM_SUPPORT 1
#else
#define DL_SPIRAM_SUPPORT 0
#endif
#ifndef max
#define max(x, y) (((x) < (y)) ? (y) : (x))
#endif
#ifndef min
#define min(x, y) (((x) < (y)) ? (x) : (y))
#endif
typedef float fptp_t;
typedef uint8_t uc_t;
typedef enum
{
DL_SUCCESS = 0,
DL_FAIL = 1,
} dl_error_type;
typedef enum
{
PADDING_VALID = 0, /*!< Valid padding */
PADDING_SAME = 1, /*!< Same padding, from right to left, free input */
PADDING_SAME_DONT_FREE_INPUT = 2, /*!< Same padding, from right to left, do not free input */
PADDING_SAME_MXNET = 3, /*!< Same padding, from left to right */
} dl_padding_type;
typedef enum
{
DL_POOLING_MAX = 0, /*!< Max pooling */
DL_POOLING_AVG = 1, /*!< Average pooling */
} dl_pooling_type;
/*
* Matrix for 3d
* @Warning: the sequence of variables is fixed, cannot be modified, otherwise there will be errors in esp_dsp_dot_float
*/
typedef struct
{
int w; /*!< Width */
int h; /*!< Height */
int c; /*!< Channel */
int n; /*!< Number of filter, input and output must be 1 */
int stride; /*!< Step between lines */
fptp_t *item; /*!< Data */
} dl_matrix3d_t;
typedef struct
{
int w; /*!< Width */
int h; /*!< Height */
int c; /*!< Channel */
int n; /*!< Number of filter, input and output must be 1 */
int stride; /*!< Step between lines */
uc_t *item; /*!< Data */
} dl_matrix3du_t;
typedef enum
{
UPSAMPLE_NEAREST_NEIGHBOR = 0, /*!< Use nearest neighbor interpolation as the upsample method*/
UPSAMPLE_BILINEAR = 1, /*!< Use nearest bilinear interpolation as the upsample method*/
} dl_upsample_type;
typedef struct
{
int stride_x; /*!< Strides of width */
int stride_y; /*!< Strides of height */
dl_padding_type padding; /*!< Padding type */
} dl_matrix3d_mobilenet_config_t;
/*
* @brief Allocate a zero-initialized space. Must use 'dl_lib_free' to free the memory.
*
* @param cnt Count of units.
* @param size Size of unit.
* @param align Align of memory. If not required, set 0.
* @return Pointer of allocated memory. Null for failed.
*/
static void *dl_lib_calloc(int cnt, int size, int align)
{
int total_size = cnt * size + align + sizeof(void *);
void *res = malloc(total_size);
if (NULL == res)
{
#if DL_SPIRAM_SUPPORT
res = heap_caps_malloc(total_size, MALLOC_CAP_8BIT | MALLOC_CAP_SPIRAM);
}
if (NULL == res)
{
printf("Item psram alloc failed. Size: %d x %d\n", cnt, size);
#else
printf("Item alloc failed. Size: %d x %d, SPIRAM_FLAG: %d\n", cnt, size, DL_SPIRAM_SUPPORT);
#endif
return NULL;
}
bzero(res, total_size);
void **data = (void **)res + 1;
void **aligned;
if (align)
aligned = (void **)(((size_t)data + (align - 1)) & -align);
else
aligned = data;
aligned[-1] = res;
return (void *)aligned;
}
/**
* @brief Free the memory space allocated by 'dl_lib_calloc'
*
*/
static inline void dl_lib_free(void *d)
{
if (NULL == d)
return;
free(((void **)d)[-1]);
}
/*
* @brief Allocate a 3D matrix with float items, the access sequence is NHWC
*
* @param n Number of matrix3d, for filters it is out channels, for others it is 1
* @param w Width of matrix3d
* @param h Height of matrix3d
* @param c Channel of matrix3d
* @return 3d matrix
*/
static inline dl_matrix3d_t *dl_matrix3d_alloc(int n, int w, int h, int c)
{
dl_matrix3d_t *r = (dl_matrix3d_t *)dl_lib_calloc(1, sizeof(dl_matrix3d_t), 0);
if (NULL == r)
{
printf("internal r failed.\n");
return NULL;
}
fptp_t *items = (fptp_t *)dl_lib_calloc(n * w * h * c, sizeof(fptp_t), 0);
if (NULL == items)
{
printf("matrix3d item alloc failed.\n");
dl_lib_free(r);
return NULL;
}
r->w = w;
r->h = h;
r->c = c;
r->n = n;
r->stride = w * c;
r->item = items;
return r;
}
/*
* @brief Allocate a 3D matrix with 8-bits items, the access sequence is NHWC
*
* @param n Number of matrix3d, for filters it is out channels, for others it is 1
* @param w Width of matrix3d
* @param h Height of matrix3d
* @param c Channel of matrix3d
* @return 3d matrix
*/
static inline dl_matrix3du_t *dl_matrix3du_alloc(int n, int w, int h, int c)
{
dl_matrix3du_t *r = (dl_matrix3du_t *)dl_lib_calloc(1, sizeof(dl_matrix3du_t), 0);
if (NULL == r)
{
printf("internal r failed.\n");
return NULL;
}
uc_t *items = (uc_t *)dl_lib_calloc(n * w * h * c, sizeof(uc_t), 0);
if (NULL == items)
{
printf("matrix3du item alloc failed.\n");
dl_lib_free(r);
return NULL;
}
r->w = w;
r->h = h;
r->c = c;
r->n = n;
r->stride = w * c;
r->item = items;
return r;
}
/*
* @brief Free a matrix3d
*
* @param m matrix3d with float items
*/
static inline void dl_matrix3d_free(dl_matrix3d_t *m)
{
if (NULL == m)
return;
if (NULL == m->item)
{
dl_lib_free(m);
return;
}
dl_lib_free(m->item);
dl_lib_free(m);
}
/*
* @brief Free a matrix3d
*
* @param m matrix3d with 8-bits items
*/
static inline void dl_matrix3du_free(dl_matrix3du_t *m)
{
if (NULL == m)
return;
if (NULL == m->item)
{
dl_lib_free(m);
return;
}
dl_lib_free(m->item);
dl_lib_free(m);
}
/*
* @brief Dot product with a vector and matrix
*
* @param out Space to put the result
* @param in input vector
* @param f filter matrix
*/
void dl_matrix3dff_dot_product(dl_matrix3d_t *out, dl_matrix3d_t *in, dl_matrix3d_t *f);
/**
* @brief Do a softmax operation on a matrix3d
*
* @param in Input matrix3d
*/
void dl_matrix3d_softmax(dl_matrix3d_t *m);
/**
* @brief Copy a range of float items from an existing matrix to a preallocated matrix
*
* @param dst The destination slice matrix
* @param src The source matrix to slice
* @param x X-offset of the origin of the returned matrix within the sliced matrix
* @param y Y-offset of the origin of the returned matrix within the sliced matrix
* @param w Width of the resulting matrix
* @param h Height of the resulting matrix
*/
void dl_matrix3d_slice_copy(dl_matrix3d_t *dst,
dl_matrix3d_t *src,
int x,
int y,
int w,
int h);
/**
* @brief Copy a range of 8-bits items from an existing matrix to a preallocated matrix
*
* @param dst The destination slice matrix
* @param src The source matrix to slice
* @param x X-offset of the origin of the returned matrix within the sliced matrix
* @param y Y-offset of the origin of the returned matrix within the sliced matrix
* @param w Width of the resulting matrix
* @param h Height of the resulting matrix
*/
void dl_matrix3du_slice_copy(dl_matrix3du_t *dst,
dl_matrix3du_t *src,
int x,
int y,
int w,
int h);
/**
* @brief Transform a sliced matrix block from nhwc to nchw, the block needs to be memory continous.
*
* @param out The destination sliced matrix in nchw
* @param in The source sliced matrix in nhwc
*/
void dl_matrix3d_sliced_transform_nchw(dl_matrix3d_t *out,
dl_matrix3d_t *in);
/**
* @brief Do a general CNN layer pass, dimension is (number, width, height, channel)
*
* @param in Input matrix3d
* @param filter Weights of the neurons
* @param bias Bias for the CNN layer
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding One of VALID or SAME
* @param mode Do convolution using C implement or xtensa implement, 0 or 1, with respect
* If ESP_PLATFORM is not defined, this value is not used. Default is 0
* @return dl_matrix3d_t* The result of CNN layer
*/
dl_matrix3d_t *dl_matrix3d_conv(dl_matrix3d_t *in,
dl_matrix3d_t *filter,
dl_matrix3d_t *bias,
int stride_x,
int stride_y,
int padding,
int mode);
/**
* @brief Do a global average pooling layer pass, dimension is (number, width, height, channel)
*
* @param in Input matrix3d
*
* @return The result of global average pooling layer
*/
dl_matrix3d_t *dl_matrix3d_global_pool(dl_matrix3d_t *in);
/**
* @brief Calculate pooling layer of a feature map
*
* @param in Input matrix, size (1, w, h, c)
* @param f_w Window width
* @param f_h Window height
* @param stride_x Stride in horizontal direction
* @param stride_y Stride in vertical direction
* @param padding Padding type: PADDING_VALID and PADDING_SAME
* @param pooling_type Pooling type: DL_POOLING_MAX and POOLING_AVG
* @return dl_matrix3d_t* Resulting matrix, size (1, w', h', c)
*/
dl_matrix3d_t *dl_matrix3d_pooling(dl_matrix3d_t *in,
int f_w,
int f_h,
int stride_x,
int stride_y,
dl_padding_type padding,
dl_pooling_type pooling_type);
/**
* @brief Do a batch normalization operation, update the input matrix3d: input = input * scale + offset
*
* @param m Input matrix3d
* @param scale scale matrix3d, scale = gamma/((moving_variance+sigma)^(1/2))
* @param Offset Offset matrix3d, offset = beta-(moving_mean*gamma/((moving_variance+sigma)^(1/2)))
*/
void dl_matrix3d_batch_normalize(dl_matrix3d_t *m,
dl_matrix3d_t *scale,
dl_matrix3d_t *offset);
/**
* @brief Add a pair of matrix3d item-by-item: res=in_1+in_2
*
* @param in_1 First Floating point input matrix3d
* @param in_2 Second Floating point input matrix3d
*
* @return dl_matrix3d_t* Added data
*/
dl_matrix3d_t *dl_matrix3d_add(dl_matrix3d_t *in_1, dl_matrix3d_t *in_2);
/**
* @brief Concatenate the channels of two matrix3ds into a new matrix3d
*
* @param in_1 First Floating point input matrix3d
* @param in_2 Second Floating point input matrix3d
*
* @return dl_matrix3d_t* A newly allocated matrix3d with as avlues in_1|in_2
*/
dl_matrix3d_t *dl_matrix3d_concat(dl_matrix3d_t *in_1, dl_matrix3d_t *in_2);
/**
* @brief Concatenate the channels of four matrix3ds into a new matrix3d
*
* @param in_1 First Floating point input matrix3d
* @param in_2 Second Floating point input matrix3d
* @param in_3 Third Floating point input matrix3d
* @param in_4 Fourth Floating point input matrix3d
*
* @return A newly allocated matrix3d with as avlues in_1|in_2|in_3|in_4
*/
dl_matrix3d_t *dl_matrix3d_concat_4(dl_matrix3d_t *in_1,
dl_matrix3d_t *in_2,
dl_matrix3d_t *in_3,
dl_matrix3d_t *in_4);
/**
* @brief Concatenate the channels of eight matrix3ds into a new matrix3d
*
* @param in_1 First Floating point input matrix3d
* @param in_2 Second Floating point input matrix3d
* @param in_3 Third Floating point input matrix3d
* @param in_4 Fourth Floating point input matrix3d
* @param in_5 Fifth Floating point input matrix3d
* @param in_6 Sixth Floating point input matrix3d
* @param in_7 Seventh Floating point input matrix3d
* @param in_8 eighth Floating point input matrix3d
*
* @return A newly allocated matrix3d with as avlues in_1|in_2|in_3|in_4|in_5|in_6|in_7|in_8
*/
dl_matrix3d_t *dl_matrix3d_concat_8(dl_matrix3d_t *in_1,
dl_matrix3d_t *in_2,
dl_matrix3d_t *in_3,
dl_matrix3d_t *in_4,
dl_matrix3d_t *in_5,
dl_matrix3d_t *in_6,
dl_matrix3d_t *in_7,
dl_matrix3d_t *in_8);
/**
* @brief Do a mobilefacenet block forward, dimension is (number, width, height, channel)
*
* @param in Input matrix3d
* @param pw Weights of the pointwise conv layer
* @param pw_bn_scale The scale params of the batch_normalize layer after the pointwise conv layer
* @param pw_bn_offset The offset params of the batch_normalize layer after the pointwise conv layer
* @param dw Weights of the depthwise conv layer
* @param dw_bn_scale The scale params of the batch_normalize layer after the depthwise conv layer
* @param dw_bn_offset The offset params of the batch_normalize layer after the depthwise conv layer
* @param pw_linear Weights of the pointwise linear conv layer
* @param pw_linear_bn_scale The scale params of the batch_normalize layer after the pointwise linear conv layer
* @param pw_linear_bn_offset The offset params of the batch_normalize layer after the pointwise linear conv layer
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding One of VALID or SAME
* @param mode Do convolution using C implement or xtensa implement, 0 or 1, with respect
* If ESP_PLATFORM is not defined, this value is not used. Default is 0
* @return The result of a mobilefacenet block
*/
dl_matrix3d_t *dl_matrix3d_mobilefaceblock(dl_matrix3d_t *in,
dl_matrix3d_t *pw,
dl_matrix3d_t *pw_bn_scale,
dl_matrix3d_t *pw_bn_offset,
dl_matrix3d_t *dw,
dl_matrix3d_t *dw_bn_scale,
dl_matrix3d_t *dw_bn_offset,
dl_matrix3d_t *pw_linear,
dl_matrix3d_t *pw_linear_bn_scale,
dl_matrix3d_t *pw_linear_bn_offset,
int stride_x,
int stride_y,
int padding,
int mode,
int shortcut);
/**
* @brief Do a mobilefacenet block forward with 1x1 split conv, dimension is (number, width, height, channel)
*
* @param in Input matrix3d
* @param pw_1 Weights of the pointwise conv layer 1
* @param pw_2 Weights of the pointwise conv layer 2
* @param pw_bn_scale The scale params of the batch_normalize layer after the pointwise conv layer
* @param pw_bn_offset The offset params of the batch_normalize layer after the pointwise conv layer
* @param dw Weights of the depthwise conv layer
* @param dw_bn_scale The scale params of the batch_normalize layer after the depthwise conv layer
* @param dw_bn_offset The offset params of the batch_normalize layer after the depthwise conv layer
* @param pw_linear_1 Weights of the pointwise linear conv layer 1
* @param pw_linear_2 Weights of the pointwise linear conv layer 2
* @param pw_linear_bn_scale The scale params of the batch_normalize layer after the pointwise linear conv layer
* @param pw_linear_bn_offset The offset params of the batch_normalize layer after the pointwise linear conv layer
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding One of VALID or SAME
* @param mode Do convolution using C implement or xtensa implement, 0 or 1, with respect
* If ESP_PLATFORM is not defined, this value is not used. Default is 0
* @return The result of a mobilefacenet block
*/
dl_matrix3d_t *dl_matrix3d_mobilefaceblock_split(dl_matrix3d_t *in,
dl_matrix3d_t *pw_1,
dl_matrix3d_t *pw_2,
dl_matrix3d_t *pw_bn_scale,
dl_matrix3d_t *pw_bn_offset,
dl_matrix3d_t *dw,
dl_matrix3d_t *dw_bn_scale,
dl_matrix3d_t *dw_bn_offset,
dl_matrix3d_t *pw_linear_1,
dl_matrix3d_t *pw_linear_2,
dl_matrix3d_t *pw_linear_bn_scale,
dl_matrix3d_t *pw_linear_bn_offset,
int stride_x,
int stride_y,
int padding,
int mode,
int shortcut);
/**
* @brief Initialize the matrix3d feature map to bias
*
* @param out The matrix3d feature map needs to be initialized
* @param bias The bias of a convlotion operation
*/
void dl_matrix3d_init_bias(dl_matrix3d_t *out, dl_matrix3d_t *bias);
/**
* @brief Do a elementwise multiplication of two matrix3ds
*
* @param out Preallocated matrix3d, size (n, w, h, c)
* @param in1 Input matrix 1, size (n, w, h, c)
* @param in2 Input matrix 2, size (n, w, h, c)
*/
void dl_matrix3d_multiply(dl_matrix3d_t *out, dl_matrix3d_t *in1, dl_matrix3d_t *in2);
//
// Activation
//
/**
* @brief Do a standard relu operation, update the input matrix3d
*
* @param m Floating point input matrix3d
*/
void dl_matrix3d_relu(dl_matrix3d_t *m);
/**
* @brief Do a relu (Rectifier Linear Unit) operation, update the input matrix3d
*
* @param in Floating point input matrix3d
* @param clip If value is higher than this, it will be clipped to this value
*/
void dl_matrix3d_relu_clip(dl_matrix3d_t *m, fptp_t clip);
/**
* @brief Do a Prelu (Rectifier Linear Unit) operation, update the input matrix3d
*
* @param in Floating point input matrix3d
* @param alpha If value is less than zero, it will be updated by multiplying this factor
*/
void dl_matrix3d_p_relu(dl_matrix3d_t *in, dl_matrix3d_t *alpha);
/**
* @brief Do a leaky relu (Rectifier Linear Unit) operation, update the input matrix3d
*
* @param in Floating point input matrix3d
* @param alpha If value is less than zero, it will be updated by multiplying this factor
*/
void dl_matrix3d_leaky_relu(dl_matrix3d_t *m, fptp_t alpha);
//
// Conv 1x1
//
/**
* @brief Do 1x1 convolution with a matrix3d
*
* @param out Preallocated matrix3d, size (1, w, h, n)
* @param in Input matrix, size (1, w, h, c)
* @param filter 1x1 filter, size (n, 1, 1, c)
*/
void dl_matrix3dff_conv_1x1(dl_matrix3d_t *out,
dl_matrix3d_t *in,
dl_matrix3d_t *filter);
/**
* @brief Do 1x1 convolution with a matrix3d, with bias adding
*
* @param out Preallocated matrix3d, size (1, w, h, n)
* @param in Input matrix, size (1, w, h, c)
* @param filter 1x1 filter, size (n, 1, 1, c)
* @param bias Bias, size (1, 1, 1, n)
*/
void dl_matrix3dff_conv_1x1_with_bias(dl_matrix3d_t *out,
dl_matrix3d_t *in,
dl_matrix3d_t *filter,
dl_matrix3d_t *bias);
/**
* @brief Do 1x1 convolution with an 8-bit fixed point matrix
*
* @param out Preallocated matrix3d, size (1, w, h, n)
* @param in Input matrix, size (1, w, h, c)
* @param filter 1x1 filter, size (n, 1, 1, c)
*/
void dl_matrix3duf_conv_1x1(dl_matrix3d_t *out,
dl_matrix3du_t *in,
dl_matrix3d_t *filter);
/**
* @brief Do 1x1 convolution with an 8-bit fixed point matrix, with bias adding
*
* @param out Preallocated matrix3d, size (1, w, h, n)
* @param in Input matrix, size (1, w, h, c)
* @param filter 1x1 filter, size (n, 1, 1, c)
* @param bias Bias, size (1, 1, 1, n)
*/
void dl_matrix3duf_conv_1x1_with_bias(dl_matrix3d_t *out,
dl_matrix3du_t *in,
dl_matrix3d_t *filter,
dl_matrix3d_t *bias);
//
// Conv 3x3
//
/**
* @brief Do 3x3 convolution with a matrix3d, without padding
*
* @param out Preallocated matrix3d, size (1, w, h, n)
* @param in Input matrix, size (1, w, h, c)
* @param f 3x3 filter, size (n, 3, 3, c)
* @param step_x Stride of width
* @param step_y Stride of height
*/
void dl_matrix3dff_conv_3x3_op(dl_matrix3d_t *out,
dl_matrix3d_t *in,
dl_matrix3d_t *f,
int step_x,
int step_y);
/**
* @brief Do 3x3 convolution with a matrix3d, with bias adding
*
* @param input Input matrix, size (1, w, h, c)
* @param filter 3x3 filter, size (n, 3, 3, c)
* @param bias Bias, size (1, 1, 1, n)
* @param stride_x Stride of width
* @param stride_y Stride of height
* @param padding Padding type
* @return dl_matrix3d_t* Resulting matrix3d
*/
dl_matrix3d_t *dl_matrix3dff_conv_3x3(dl_matrix3d_t *in,
dl_matrix3d_t *filter,
dl_matrix3d_t *bias,
int stride_x,
int stride_y,
dl_padding_type padding);
//
// Conv Common
//
/**
* @brief Do a general convolution layer pass with an 8-bit fixed point matrix, size is (number, width, height, channel)
*
* @param in Input image
* @param filter Weights of the neurons
* @param bias Bias for the CNN layer
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding Padding type
* @return dl_matrix3d_t* Resulting matrix3d
*/
dl_matrix3d_t *dl_matrix3duf_conv_common(dl_matrix3du_t *in,
dl_matrix3d_t *filter,
dl_matrix3d_t *bias,
int stride_x,
int stride_y,
dl_padding_type padding);
/**
* @brief Do a general convolution layer pass, size is (number, width, height, channel)
*
* @param in Input image
* @param filter Weights of the neurons
* @param bias Bias for the CNN layer
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding Padding type
* @return dl_matrix3d_t* Resulting matrix3d
*/
dl_matrix3d_t *dl_matrix3dff_conv_common(dl_matrix3d_t *in,
dl_matrix3d_t *filter,
dl_matrix3d_t *bias,
int stride_x,
int stride_y,
dl_padding_type padding);
//
// Depthwise 3x3
//
/**
* @brief Do 3x3 depthwise convolution with a float matrix3d
*
* @param in Input matrix, size (1, w, h, c)
* @param filter 3x3 filter, size (1, 3, 3, c)
* @param stride_x Stride of width
* @param stride_y Stride of height
* @param padding Padding type, 0: valid, 1: same
* @return dl_matrix3d_t* Resulting float matrix3d
*/
dl_matrix3d_t *dl_matrix3dff_depthwise_conv_3x3(dl_matrix3d_t *in,
dl_matrix3d_t *filter,
int stride_x,
int stride_y,
int padding);
/**
* @brief Do 3x3 depthwise convolution with a 8-bit fixed point matrix
*
* @param in Input matrix, size (1, w, h, c)
* @param filter 3x3 filter, size (1, 3, 3, c)
* @param stride_x Stride of width
* @param stride_y Stride of height
* @param padding Padding type, 0: valid, 1: same
* @return dl_matrix3d_t* Resulting float matrix3d
*/
dl_matrix3d_t *dl_matrix3duf_depthwise_conv_3x3(dl_matrix3du_t *in,
dl_matrix3d_t *filter,
int stride_x,
int stride_y,
int padding);
/**
* @brief Do 3x3 depthwise convolution with a float matrix3d, without padding
*
* @param out Preallocated matrix3d, size (1, w, h, n)
* @param in Input matrix, size (1, w, h, c)
* @param f 3x3 filter, size (1, 3, 3, c)
* @param step_x Stride of width
* @param step_y Stride of height
*/
void dl_matrix3dff_depthwise_conv_3x3_op(dl_matrix3d_t *out,
dl_matrix3d_t *in,
dl_matrix3d_t *f,
int step_x,
int step_y);
//
// Depthwise Common
//
/**
* @brief Do a depthwise CNN layer pass, dimension is (number, width, height, channel)
*
* @param in Input matrix3d
* @param filter Weights of the neurons
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding One of VALID or SAME
* @param mode Do convolution using C implement or xtensa implement, 0 or 1, with respect
* If ESP_PLATFORM is not defined, this value is not used. Default is 0
* @return The result of depthwise CNN layer
*/
dl_matrix3d_t *dl_matrix3dff_depthwise_conv_common(dl_matrix3d_t *in,
dl_matrix3d_t *filter,
int stride_x,
int stride_y,
dl_padding_type padding);
//
// FC
//
/**
* @brief Do a general fully connected layer pass, dimension is (number, width, height, channel)
*
* @param in Input matrix3d, size is (1, w, 1, 1)
* @param filter Weights of the neurons, size is (1, w, h, 1)
* @param bias Bias for the fc layer, size is (1, 1, 1, h)
* @return The result of fc layer, size is (1, 1, 1, h)
*/
void dl_matrix3dff_fc(dl_matrix3d_t *out,
dl_matrix3d_t *in,
dl_matrix3d_t *filter);
/**
* @brief Do fully connected layer forward, with bias adding
*
* @param out Preallocated resulting matrix, size (1, 1, 1, h)
* @param in Input matrix, size (1, 1, 1, w)
* @param filter Filter matrix, size (1, w, h, 1)
* @param bias Bias matrix, size (1, 1, 1, h)
*/
void dl_matrix3dff_fc_with_bias(dl_matrix3d_t *out,
dl_matrix3d_t *in,
dl_matrix3d_t *filter,
dl_matrix3d_t *bias);
//
// Mobilenet
//
/**
* @brief Do a mobilenet block forward, dimension is (number, width, height, channel)
*
* @param in Input matrix3d
* @param filter Weights of the neurons
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding One of VALID or SAME
* @param mode Do convolution using C implement or xtensa implement, 0 or 1, with respect
* If ESP_PLATFORM is not defined, this value is not used. Default is 0
* @return The result of depthwise CNN layer
*/
dl_matrix3d_t *dl_matrix3dff_mobilenet(dl_matrix3d_t *in,
dl_matrix3d_t *dilate_filter,
dl_matrix3d_t *dilate_prelu,
dl_matrix3d_t *depthwise_filter,
dl_matrix3d_t *depthwise_prelu,
dl_matrix3d_t *compress_filter,
dl_matrix3d_t *bias,
dl_matrix3d_mobilenet_config_t config);
/**
* @brief Do a mobilenet block forward, dimension is (number, width, height, channel)
*
* @param in Input matrix3du
* @param filter Weights of the neurons
* @param stride_x The step length of the convolution window in x(width) direction
* @param stride_y The step length of the convolution window in y(height) direction
* @param padding One of VALID or SAME
* @param mode Do convolution using C implement or xtensa implement, 0 or 1, with respect
* If ESP_PLATFORM is not defined, this value is not used. Default is 0
* @return The result of depthwise CNN layer
*/
dl_matrix3d_t *dl_matrix3duf_mobilenet(dl_matrix3du_t *in,
dl_matrix3d_t *dilate_filter,
dl_matrix3d_t *dilate_prelu,
dl_matrix3d_t *depthwise_filter,
dl_matrix3d_t *depthwise_prelu,
dl_matrix3d_t *compress_filter,
dl_matrix3d_t *bias,
dl_matrix3d_mobilenet_config_t config);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "dl_lib_matrix3d.h"
#include "dl_lib_matrix3dq.h"
/**
* @brief Forward the face recognition process with frmn model. Calculate in float.
*
* @param in Image matrix, rgb888 format, size is 56x56, normalized
* @return dl_matrix3d_t* Face ID feature vector, size is 512
*/
dl_matrix3d_t *frmn(dl_matrix3d_t *in);
/**@{*/
/**
* @brief Forward the face recognition process with specified model. Calculate in quantization.
*
* @param in Image matrix, rgb888 format, size is 56x56, normalized
* @param mode 0: C implement; 1: handwrite xtensa instruction implement
* @return Face ID feature vector, size is 512
*/
dl_matrix3dq_t *frmn_q(dl_matrix3dq_t *in, dl_conv_mode mode);
dl_matrix3dq_t *frmn2p_q(dl_matrix3dq_t *in, dl_conv_mode mode);
dl_matrix3dq_t *mfn56_42m_q(dl_matrix3dq_t *in, dl_conv_mode mode);
dl_matrix3dq_t *mfn56_72m_q(dl_matrix3dq_t *in, dl_conv_mode mode);
dl_matrix3dq_t *mfn56_112m_q(dl_matrix3dq_t *in, dl_conv_mode mode);
dl_matrix3dq_t *mfn56_156m_q(dl_matrix3dq_t *in, dl_conv_mode mode);
/**@}*/
#if __cplusplus
}
#endif

View File

@ -0,0 +1,66 @@
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "dl_lib_matrix3d.h"
#include "dl_lib_matrix3dq.h"
typedef struct
{
int num; /*!< The total number of the boxes */
dl_matrix3d_t *cls; /*!< The class feature map corresponding to the box. size: (height, width, anchor_num, 1) */
dl_matrix3d_t *score; /*!< The confidence score feature map of the class corresponding to the box. size: (height, width, anchor_num, 1) */
dl_matrix3d_t *boxes; /*!< (x, y, w, h) of the boxes. x and y are the center coordinates. size:(height, width, anchor_num, 4) */
} detection_result_t;
/**
* @brief Forward the hand detection process with hd_nano1 model. Calculate in quantization.
*
* @param in A normalized image matrix in rgb888 format, its width and height must be integer multiples of 16.
* @param mode 0: C implement; 1: handwrite xtensa instruction implement
* @return detection_result_t** Detection results
*/
detection_result_t **hd_nano1_q(dl_matrix3dq_t *in, dl_conv_mode mode);
/**
* @brief Forward the hand detection process with hd_lite1 model. Calculate in quantization.
*
* @param in A normalized image matrix in rgb888 format, its width and height must be integer multiples of 32.
* @param mode 0: C implement; 1: handwrite xtensa instruction implement.
* @return detection_result_t** Detection results.
*/
detection_result_t **hd_lite1_q(dl_matrix3dq_t *in, dl_conv_mode mode);
/**
* @brief Free the single detection result.
*
* @param m The single detection result.
*/
void detection_result_free(detection_result_t *m);
/**
* @brief Free the detection result group from different feature map.
*
* @param m The detection result group
* @param length The number of the detection results
*/
void detection_results_free(detection_result_t **m, int length);
/**
* @brief Test the result of hand detection model.
*
*/
void hd_test();
/**
* @brief Test the forward time of hand detection model.
*
*/
void hd_time_test();
#if __cplusplus
}
#endif

View File

@ -0,0 +1,43 @@
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "dl_lib_matrix3d.h"
#include "dl_lib_matrix3dq.h"
/**
* @brief Forward the hand pose estimation process with hp_nano1_ls16 model. Calculate in quantization.
*
* @param in A normalized image matrix in rgb888 format, its size is (1, 128, 128, 3).
* @param mode 0: C implement; 1: handwrite xtensa instruction implement
* @return dl_matrix3d_t* The resulting hand joint point coordinates, the size is (1, 1, 21, 2)
*/
dl_matrix3d_t *hp_nano1_ls16_q(dl_matrix3dq_t *in, dl_conv_mode mode);
/**
* @brief Forward the hand pose estimation process with hp_lite1 model. Calculate in quantization.
*
* @param in A normalized image matrix in rgb888 format, its size is (1, 128, 128, 3).
* @param mode 0: C implement; 1: handwrite xtensa instruction implement
* @return dl_matrix3d_t* The resulting hand joint point coordinates, the size is (1, 1, 21, 2)
*/
dl_matrix3d_t *hp_lite1_q(dl_matrix3dq_t *in, dl_conv_mode mode);
/**
* @brief Test the result of hand pose estimation model.
*
*/
void hp_test();
/**
* @brief Test the forward time of hand pose estimation model.
*
*/
void hp_time_test();
#if __cplusplus
}
#endif

View File

@ -0,0 +1,91 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include "dl_lib_matrix3d.h"
#include "dl_lib_matrix3dq.h"
#include "freertos/FreeRTOS.h"
typedef struct
{
int resized_height;
int resized_width;
fptp_t y_resize_scale;
fptp_t x_resize_scale;
int enabled_top_k;
fptp_t score_threshold;
fptp_t nms_threshold;
dl_conv_mode mode;
} lssh_config_t;
typedef struct
{
int *anchor_size;
int stride;
int boundary;
} lssh_module_config_t;
typedef struct
{
lssh_module_config_t *module_config;
int number;
} lssh_modules_config_t;
typedef struct
{
dl_matrix3d_t *category;
dl_matrix3d_t *box_offset;
dl_matrix3d_t *landmark_offset;
} lssh_module_result_t;
/**
* @brief
*
* @param value
*/
void lssh_module_result_free(lssh_module_result_t value);
/**
* @brief
*
* @param values
* @param length
*/
void lssh_module_results_free(lssh_module_result_t *values, int length);
/////////////////////////
//////sparse_mn_5_q//////
/////////////////////////
extern lssh_modules_config_t sparse_mn_5_modules_config;
lssh_module_result_t *sparse_mn_5_q_without_landmark(dl_matrix3du_t *image, bool free_image, int enabled_top_k, dl_conv_mode mode);
lssh_module_result_t *sparse_mn_5_q_with_landmark(dl_matrix3du_t *image, bool free_image, int enabled_top_k, dl_conv_mode mode);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,142 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include "dl_lib_matrix3d.h"
#include "dl_lib_matrix3dq.h"
/**
* Detection results with MTMN.
*
*/
typedef struct
{
dl_matrix3d_t *category; /*!< Classification result after softmax, channel is 2 */
dl_matrix3d_t *offset; /*!< Bounding box offset of 2 points: top-left and bottom-right, channel is 4 */
dl_matrix3d_t *landmark; /*!< Offsets of 5 landmarks:
* - Left eye
* - Mouth leftside
* - Nose
* - Right eye
* - Mouth rightside
*
* channel is 10
* */
} mtmn_net_t;
/**
* @brief Free a mtmn_net_t
*
* @param p A mtmn_net_t pointer
*
*/
void mtmn_net_t_free(mtmn_net_t *p);
/**
* @brief Forward the pnet process, coarse detection. Calculate in float.
*
* @param in Image matrix, rgb888 format, size is 320x240
* @return Scores for every pixel, and box offset with respect.
*/
mtmn_net_t *pnet_lite_f(dl_matrix3du_t *in);
/**
* @brief Forward the rnet process, fine determine the boxes from pnet. Calculate in float.
*
* @param in Image matrix, rgb888 format
* @param threshold Score threshold to detect human face
* @return Scores for every box, and box offset with respect.
*/
mtmn_net_t *rnet_lite_f_with_score_verify(dl_matrix3du_t *in, float threshold);
/**
* @brief Forward the onet process, fine determine the boxes from rnet. Calculate in float.
*
* @param in Image matrix, rgb888 format
* @param threshold Score threshold to detect human face
* @return Scores for every box, box offset, and landmark with respect.
*/
mtmn_net_t *onet_lite_f_with_score_verify(dl_matrix3du_t *in, float threshold);
/**
* @brief Forward the pnet process, coarse detection. Calculate in quantization.
*
* @param in Image matrix, rgb888 format, size is 320x240
* @return Scores for every pixel, and box offset with respect.
*/
mtmn_net_t *pnet_lite_q(dl_matrix3du_t *in, dl_conv_mode mode);
/**
* @brief Forward the rnet process, fine determine the boxes from pnet. Calculate in quantization.
*
* @param in Image matrix, rgb888 format
* @param threshold Score threshold to detect human face
* @return Scores for every box, and box offset with respect.
*/
mtmn_net_t *rnet_lite_q_with_score_verify(dl_matrix3du_t *in, float threshold, dl_conv_mode mode);
/**
* @brief Forward the onet process, fine determine the boxes from rnet. Calculate in quantization.
*
* @param in Image matrix, rgb888 format
* @param threshold Score threshold to detect human face
* @return Scores for every box, box offset, and landmark with respect.
*/
mtmn_net_t *onet_lite_q_with_score_verify(dl_matrix3du_t *in, float threshold, dl_conv_mode mode);
/**
* @brief Forward the pnet process, coarse detection. Calculate in quantization.
*
* @param in Image matrix, rgb888 format, size is 320x240
* @return Scores for every pixel, and box offset with respect.
*/
mtmn_net_t *pnet_heavy_q(dl_matrix3du_t *in, dl_conv_mode mode);
/**
* @brief Forward the rnet process, fine determine the boxes from pnet. Calculate in quantization.
*
* @param in Image matrix, rgb888 format
* @param threshold Score threshold to detect human face
* @return Scores for every box, and box offset with respect.
*/
mtmn_net_t *rnet_heavy_q_with_score_verify(dl_matrix3du_t *in, float threshold, dl_conv_mode mode);
/**
* @brief Forward the onet process, fine determine the boxes from rnet. Calculate in quantization.
*
* @param in Image matrix, rgb888 format
* @param threshold Score threshold to detect human face
* @return Scores for every box, box offset, and landmark with respect.
*/
mtmn_net_t *onet_heavy_q_with_score_verify(dl_matrix3du_t *in, float threshold, dl_conv_mode mode);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,59 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "image_util.h"
#include "detection.h"
// Include models
#include "cat_face_3.h"
/**
* @brief update detection hyperparameter
*
* @param model The detection model
* @param resize_scale The resize scale of input image
* @param score_threshold Score threshold, used to filter candidates by score
* @param nms_threshold NMS threshold, used to filter out overlapping boxes
* @param image_height Input image height
* @param image_width Input image width
*/
void update_detection_model(detection_model_t *model, fptp_t resize_scale, fptp_t score_threshold, fptp_t nms_threshold, int image_height, int image_width);
/**
* @brief
*
* @param image The input image
* @param model A 'detection_model_t' type point of detection model
* @return box_array_t* The detection result with box and corresponding score and category
*/
box_array_t *detect_object(dl_matrix3du_t *image, detection_model_t *model);
#if __cplusplus
}
#endif

View File

@ -0,0 +1,153 @@
/*
* ESPRESSIF MIT License
*
* Copyright (c) 2018 <ESPRESSIF SYSTEMS (SHANGHAI) PTE LTD>
*
* Permission is hereby granted for use on ESPRESSIF SYSTEMS products only, in which case,
* it is free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the Software is furnished
* to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or
* substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#pragma once
#if __cplusplus
extern "C"
{
#endif
#include "image_util.h"
#include "dl_lib_matrix3d.h"
#include "hd_model.h"
#include "hp_model.h"
#define INPUT_EXPONENT -10
#define SCORE_THRESHOLD 0.5
#define NMS_THRESHOLD 0.45
#if CONFIG_HD_LITE1
#define HP_TARGET_SIZE 128
#else
#define HP_TARGET_SIZE 128
#endif
typedef struct
{
int target_size; /*!< The input size of hand detection network */
fptp_t score_threshold; /*!< score threshold used to filter candidates by score */
fptp_t nms_threshold; /*!< nms threshold used to filter out overlapping boxes */
} hd_config_t;
/**
* @brief Get the default hand detection network configuration
*
* @return hd_config_t The default configuration
*/
static inline hd_config_t hd_init_config()
{
hd_config_t hd_config;
hd_config.target_size = 96;
hd_config.score_threshold = SCORE_THRESHOLD;
hd_config.nms_threshold = NMS_THRESHOLD;
return hd_config;
}
typedef struct tag_od_box_list
{
fptp_t *score; /*!< The confidence score of the class corresponding to the box */
qtp_t *cls; /*!< The class corresponding to the box */
box_t *box; /*!< (x1, y1, x2, y2) of the boxes */
int len; /*!< The number of the boxes */
} od_box_array_t;
typedef struct tag_od_image_box
{
struct tag_od_image_box *next; /*!< Next od_image_box_t */
fptp_t score; /*!< The confidence score of the class corresponding to the box */
qtp_t cls; /*!< The class corresponding to the box */
box_t box; /*!< (x1, y1, x2, y2) of the boxes */
} od_image_box_t;
typedef struct tag_od_image_list
{
od_image_box_t *head; /*!< The current head of the od_image_list */
od_image_box_t *origin_head; /*!< The original head of the od_image_list */
int len; /*!< Length of the od_image_list */
} od_image_list_t;
/**
* @brief Sort the resulting box lists by their confidence score.
*
* @param image_sorted_list The sorted box list.
* @param insert_list The box list that have not been sorted.
*/
void od_image_sort_insert_by_score(od_image_list_t *image_sorted_list, const od_image_list_t *insert_list);
/**
* @brief Filter out the resulting boxes whose confidence score is lower than the threshold and convert the boxes to the actual boxes on the original image.((x, y, w, h) -> (x1, y1, x2, y2))
*
* @param score Confidence score of the boxes.
* @param cls Class of the boxes.
* @param boxes (x, y, w, h) of the boxes. x and y are the center coordinates.
* @param height Height of the detection output feature map.
* @param width Width of the detection output feature map.
* @param anchor_number Anchor number of the detection output feature map.
* @param score_threshold Threshold of the confidence score.
* @param resize_scale Resize scale: target_size/orignal_size.
* @param padding_w Width padding in preporcess.
* @param padding_h Height padding in preporcess.
* @return od_image_list_t* Resulting valid boxes.
*/
od_image_list_t *od_image_get_valid_boxes(fptp_t *score,
fptp_t *cls,
fptp_t *boxes,
int height,
int width,
int anchor_number,
fptp_t score_threshold,
fptp_t resize_scale,
int padding_w,
int padding_h);
/**
* @brief Run NMS algorithm
*
* @param image_list The input boxes list
* @param nms_threshold NMS threshold
*/
void od_image_nms_process(od_image_list_t *image_list, fptp_t nms_threshold);
/**
* @brief Do hand detection, return box infomation.
*
* @param image Image matrix, rgb888 format
* @param hd_config Configuration of hand detection
* @return od_box_array_t* A list of boxes, score and class.
*/
od_box_array_t *hand_detection_forward(dl_matrix3du_t *image, hd_config_t hd_config);
/**
* @brief Do hand pose estimation, return 21 landmarks of each hand.
*
* @param image Image matrix, rgb888 format
* @param od_boxes The output of the hand detection network
* @param target_size The input size of hand pose estimation network
* @return dl_matrix3d_t* The coordinates of 21 landmarks on the input image for each hand, size (n, 1, 21, 2)
*/
dl_matrix3d_t *handpose_estimation_forward(dl_matrix3du_t *image, od_box_array_t *od_boxes, int target_size);
#if __cplusplus
}
#endif

View File

@ -0,0 +1,43 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef _ESP_JPG_DECODE_H_
#define _ESP_JPG_DECODE_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include "esp_err.h"
typedef enum {
JPG_SCALE_NONE,
JPG_SCALE_2X,
JPG_SCALE_4X,
JPG_SCALE_8X,
JPG_SCALE_MAX = JPG_SCALE_8X
} jpg_scale_t;
typedef size_t (* jpg_reader_cb)(void * arg, size_t index, uint8_t *buf, size_t len);
typedef bool (* jpg_writer_cb)(void * arg, uint16_t x, uint16_t y, uint16_t w, uint16_t h, uint8_t *data);
esp_err_t esp_jpg_decode(size_t len, jpg_scale_t scale, jpg_reader_cb reader, jpg_writer_cb writer, void * arg);
#ifdef __cplusplus
}
#endif
#endif /* _ESP_JPG_DECODE_H_ */

View File

@ -0,0 +1,126 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef _IMG_CONVERTERS_H_
#define _IMG_CONVERTERS_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include "esp_camera.h"
typedef size_t (* jpg_out_cb)(void * arg, size_t index, const void* data, size_t len);
/**
* @brief Convert image buffer to JPEG
*
* @param src Source buffer in RGB565, RGB888, YUYV or GRAYSCALE format
* @param src_len Length in bytes of the source buffer
* @param width Width in pixels of the source image
* @param height Height in pixels of the source image
* @param format Format of the source image
* @param quality JPEG quality of the resulting image
* @param cp Callback to be called to write the bytes of the output JPEG
* @param arg Pointer to be passed to the callback
*
* @return true on success
*/
bool fmt2jpg_cb(uint8_t *src, size_t src_len, uint16_t width, uint16_t height, pixformat_t format, uint8_t quality, jpg_out_cb cb, void * arg);
/**
* @brief Convert camera frame buffer to JPEG
*
* @param fb Source camera frame buffer
* @param quality JPEG quality of the resulting image
* @param cp Callback to be called to write the bytes of the output JPEG
* @param arg Pointer to be passed to the callback
*
* @return true on success
*/
bool frame2jpg_cb(camera_fb_t * fb, uint8_t quality, jpg_out_cb cb, void * arg);
/**
* @brief Convert image buffer to JPEG buffer
*
* @param src Source buffer in RGB565, RGB888, YUYV or GRAYSCALE format
* @param src_len Length in bytes of the source buffer
* @param width Width in pixels of the source image
* @param height Height in pixels of the source image
* @param format Format of the source image
* @param quality JPEG quality of the resulting image
* @param out Pointer to be populated with the address of the resulting buffer
* @param out_len Pointer to be populated with the length of the output buffer
*
* @return true on success
*/
bool fmt2jpg(uint8_t *src, size_t src_len, uint16_t width, uint16_t height, pixformat_t format, uint8_t quality, uint8_t ** out, size_t * out_len);
/**
* @brief Convert camera frame buffer to JPEG buffer
*
* @param fb Source camera frame buffer
* @param quality JPEG quality of the resulting image
* @param out Pointer to be populated with the address of the resulting buffer
* @param out_len Pointer to be populated with the length of the output buffer
*
* @return true on success
*/
bool frame2jpg(camera_fb_t * fb, uint8_t quality, uint8_t ** out, size_t * out_len);
/**
* @brief Convert image buffer to BMP buffer
*
* @param src Source buffer in JPEG, RGB565, RGB888, YUYV or GRAYSCALE format
* @param src_len Length in bytes of the source buffer
* @param width Width in pixels of the source image
* @param height Height in pixels of the source image
* @param format Format of the source image
* @param out Pointer to be populated with the address of the resulting buffer
* @param out_len Pointer to be populated with the length of the output buffer
*
* @return true on success
*/
bool fmt2bmp(uint8_t *src, size_t src_len, uint16_t width, uint16_t height, pixformat_t format, uint8_t ** out, size_t * out_len);
/**
* @brief Convert camera frame buffer to BMP buffer
*
* @param fb Source camera frame buffer
* @param out Pointer to be populated with the address of the resulting buffer
* @param out_len Pointer to be populated with the length of the output buffer
*
* @return true on success
*/
bool frame2bmp(camera_fb_t * fb, uint8_t ** out, size_t * out_len);
/**
* @brief Convert image buffer to RGB888 buffer (used for face detection)
*
* @param src Source buffer in JPEG, RGB565, RGB888, YUYV or GRAYSCALE format
* @param src_len Length in bytes of the source buffer
* @param format Format of the source image
* @param rgb_buf Pointer to the output buffer (width * height * 3)
*
* @return true on success
*/
bool fmt2rgb888(const uint8_t *src_buf, size_t src_len, pixformat_t format, uint8_t * rgb_buf);
#ifdef __cplusplus
}
#endif
#endif /* _IMG_CONVERTERS_H_ */

View File

@ -0,0 +1,195 @@
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
* Example Use
*
static camera_config_t camera_example_config = {
.pin_pwdn = PIN_PWDN,
.pin_reset = PIN_RESET,
.pin_xclk = PIN_XCLK,
.pin_sscb_sda = PIN_SIOD,
.pin_sscb_scl = PIN_SIOC,
.pin_d7 = PIN_D7,
.pin_d6 = PIN_D6,
.pin_d5 = PIN_D5,
.pin_d4 = PIN_D4,
.pin_d3 = PIN_D3,
.pin_d2 = PIN_D2,
.pin_d1 = PIN_D1,
.pin_d0 = PIN_D0,
.pin_vsync = PIN_VSYNC,
.pin_href = PIN_HREF,
.pin_pclk = PIN_PCLK,
.xclk_freq_hz = 20000000,
.ledc_timer = LEDC_TIMER_0,
.ledc_channel = LEDC_CHANNEL_0,
.pixel_format = PIXFORMAT_JPEG,
.frame_size = FRAMESIZE_SVGA,
.jpeg_quality = 10,
.fb_count = 2
};
esp_err_t camera_example_init(){
return esp_camera_init(&camera_example_config);
}
esp_err_t camera_example_capture(){
//capture a frame
camera_fb_t * fb = esp_camera_fb_get();
if (!fb) {
ESP_LOGE(TAG, "Frame buffer could not be acquired");
return ESP_FAIL;
}
//replace this with your own function
display_image(fb->width, fb->height, fb->pixformat, fb->buf, fb->len);
//return the frame buffer back to be reused
esp_camera_fb_return(fb);
return ESP_OK;
}
*/
#pragma once
#include "esp_err.h"
#include "driver/ledc.h"
#include "sensor.h"
#include "sys/time.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Configuration structure for camera initialization
*/
typedef struct {
int pin_pwdn; /*!< GPIO pin for camera power down line */
int pin_reset; /*!< GPIO pin for camera reset line */
int pin_xclk; /*!< GPIO pin for camera XCLK line */
int pin_sscb_sda; /*!< GPIO pin for camera SDA line */
int pin_sscb_scl; /*!< GPIO pin for camera SCL line */
int pin_d7; /*!< GPIO pin for camera D7 line */
int pin_d6; /*!< GPIO pin for camera D6 line */
int pin_d5; /*!< GPIO pin for camera D5 line */
int pin_d4; /*!< GPIO pin for camera D4 line */
int pin_d3; /*!< GPIO pin for camera D3 line */
int pin_d2; /*!< GPIO pin for camera D2 line */
int pin_d1; /*!< GPIO pin for camera D1 line */
int pin_d0; /*!< GPIO pin for camera D0 line */
int pin_vsync; /*!< GPIO pin for camera VSYNC line */
int pin_href; /*!< GPIO pin for camera HREF line */
int pin_pclk; /*!< GPIO pin for camera PCLK line */
int xclk_freq_hz; /*!< Frequency of XCLK signal, in Hz. Either 20KHz or 10KHz for OV2640 double FPS (Experimental) */
ledc_timer_t ledc_timer; /*!< LEDC timer to be used for generating XCLK */
ledc_channel_t ledc_channel; /*!< LEDC channel to be used for generating XCLK */
pixformat_t pixel_format; /*!< Format of the pixel data: PIXFORMAT_ + YUV422|GRAYSCALE|RGB565|JPEG */
framesize_t frame_size; /*!< Size of the output image: FRAMESIZE_ + QVGA|CIF|VGA|SVGA|XGA|SXGA|UXGA */
int jpeg_quality; /*!< Quality of JPEG output. 0-63 lower means higher quality */
size_t fb_count; /*!< Number of frame buffers to be allocated. If more than one, then each frame will be acquired (double speed) */
} camera_config_t;
/**
* @brief Data structure of camera frame buffer
*/
typedef struct {
uint8_t * buf; /*!< Pointer to the pixel data */
size_t len; /*!< Length of the buffer in bytes */
size_t width; /*!< Width of the buffer in pixels */
size_t height; /*!< Height of the buffer in pixels */
pixformat_t format; /*!< Format of the pixel data */
struct timeval timestamp; /*!< Timestamp since boot of the first DMA buffer of the frame */
} camera_fb_t;
#define ESP_ERR_CAMERA_BASE 0x20000
#define ESP_ERR_CAMERA_NOT_DETECTED (ESP_ERR_CAMERA_BASE + 1)
#define ESP_ERR_CAMERA_FAILED_TO_SET_FRAME_SIZE (ESP_ERR_CAMERA_BASE + 2)
#define ESP_ERR_CAMERA_FAILED_TO_SET_OUT_FORMAT (ESP_ERR_CAMERA_BASE + 3)
#define ESP_ERR_CAMERA_NOT_SUPPORTED (ESP_ERR_CAMERA_BASE + 4)
/**
* @brief Initialize the camera driver
*
* @note call camera_probe before calling this function
*
* This function detects and configures camera over I2C interface,
* allocates framebuffer and DMA buffers,
* initializes parallel I2S input, and sets up DMA descriptors.
*
* Currently this function can only be called once and there is
* no way to de-initialize this module.
*
* @param config Camera configuration parameters
*
* @return ESP_OK on success
*/
esp_err_t esp_camera_init(const camera_config_t* config);
/**
* @brief Deinitialize the camera driver
*
* @return
* - ESP_OK on success
* - ESP_ERR_INVALID_STATE if the driver hasn't been initialized yet
*/
esp_err_t esp_camera_deinit();
/**
* @brief Obtain pointer to a frame buffer.
*
* @return pointer to the frame buffer
*/
camera_fb_t* esp_camera_fb_get();
/**
* @brief Return the frame buffer to be reused again.
*
* @param fb Pointer to the frame buffer
*/
void esp_camera_fb_return(camera_fb_t * fb);
/**
* @brief Get a pointer to the image sensor control structure
*
* @return pointer to the sensor
*/
sensor_t * esp_camera_sensor_get();
/**
* @brief Save camera settings to non-volatile-storage (NVS)
*
* @param key A unique nvs key name for the camera settings
*/
esp_err_t esp_camera_save_to_nvs(const char *key);
/**
* @brief Load camera settings from non-volatile-storage (NVS)
*
* @param key A unique nvs key name for the camera settings
*/
esp_err_t esp_camera_load_from_nvs(const char *key);
#ifdef __cplusplus
}
#endif
#include "img_converters.h"

View File

@ -0,0 +1,191 @@
/*
* This file is part of the OpenMV project.
* Copyright (c) 2013/2014 Ibrahim Abdelkader <i.abdalkader@gmail.com>
* This work is licensed under the MIT license, see the file LICENSE for details.
*
* Sensor abstraction layer.
*
*/
#ifndef __SENSOR_H__
#define __SENSOR_H__
#include <stdint.h>
#include <stdbool.h>
#define OV9650_PID (0x96)
#define OV7725_PID (0x77)
#define OV2640_PID (0x26)
#define OV3660_PID (0x36)
#define OV5640_PID (0x56)
typedef enum {
PIXFORMAT_RGB565, // 2BPP/RGB565
PIXFORMAT_YUV422, // 2BPP/YUV422
PIXFORMAT_GRAYSCALE, // 1BPP/GRAYSCALE
PIXFORMAT_JPEG, // JPEG/COMPRESSED
PIXFORMAT_RGB888, // 3BPP/RGB888
PIXFORMAT_RAW, // RAW
PIXFORMAT_RGB444, // 3BP2P/RGB444
PIXFORMAT_RGB555, // 3BP2P/RGB555
} pixformat_t;
typedef enum {
FRAMESIZE_96X96, // 96x96
FRAMESIZE_QQVGA, // 160x120
FRAMESIZE_QCIF, // 176x144
FRAMESIZE_HQVGA, // 240x176
FRAMESIZE_240X240, // 240x240
FRAMESIZE_QVGA, // 320x240
FRAMESIZE_CIF, // 400x296
FRAMESIZE_HVGA, // 480x320
FRAMESIZE_VGA, // 640x480
FRAMESIZE_SVGA, // 800x600
FRAMESIZE_XGA, // 1024x768
FRAMESIZE_HD, // 1280x720
FRAMESIZE_SXGA, // 1280x1024
FRAMESIZE_UXGA, // 1600x1200
// 3MP Sensors
FRAMESIZE_FHD, // 1920x1080
FRAMESIZE_P_HD, // 720x1280
FRAMESIZE_P_3MP, // 864x1536
FRAMESIZE_QXGA, // 2048x1536
// 5MP Sensors
FRAMESIZE_QHD, // 2560x1440
FRAMESIZE_WQXGA, // 2560x1600
FRAMESIZE_P_FHD, // 1080x1920
FRAMESIZE_QSXGA, // 2560x1920
FRAMESIZE_INVALID
} framesize_t;
typedef enum {
ASPECT_RATIO_4X3,
ASPECT_RATIO_3X2,
ASPECT_RATIO_16X10,
ASPECT_RATIO_5X3,
ASPECT_RATIO_16X9,
ASPECT_RATIO_21X9,
ASPECT_RATIO_5X4,
ASPECT_RATIO_1X1,
ASPECT_RATIO_9X16
} aspect_ratio_t;
typedef enum {
GAINCEILING_2X,
GAINCEILING_4X,
GAINCEILING_8X,
GAINCEILING_16X,
GAINCEILING_32X,
GAINCEILING_64X,
GAINCEILING_128X,
} gainceiling_t;
typedef struct {
uint16_t max_width;
uint16_t max_height;
uint16_t start_x;
uint16_t start_y;
uint16_t end_x;
uint16_t end_y;
uint16_t offset_x;
uint16_t offset_y;
uint16_t total_x;
uint16_t total_y;
} ratio_settings_t;
typedef struct {
const uint16_t width;
const uint16_t height;
const aspect_ratio_t aspect_ratio;
} resolution_info_t;
// Resolution table (in sensor.c)
extern const resolution_info_t resolution[];
typedef struct {
uint8_t MIDH;
uint8_t MIDL;
uint8_t PID;
uint8_t VER;
} sensor_id_t;
typedef struct {
framesize_t framesize;//0 - 10
bool scale;
bool binning;
uint8_t quality;//0 - 63
int8_t brightness;//-2 - 2
int8_t contrast;//-2 - 2
int8_t saturation;//-2 - 2
int8_t sharpness;//-2 - 2
uint8_t denoise;
uint8_t special_effect;//0 - 6
uint8_t wb_mode;//0 - 4
uint8_t awb;
uint8_t awb_gain;
uint8_t aec;
uint8_t aec2;
int8_t ae_level;//-2 - 2
uint16_t aec_value;//0 - 1200
uint8_t agc;
uint8_t agc_gain;//0 - 30
uint8_t gainceiling;//0 - 6
uint8_t bpc;
uint8_t wpc;
uint8_t raw_gma;
uint8_t lenc;
uint8_t hmirror;
uint8_t vflip;
uint8_t dcw;
uint8_t colorbar;
} camera_status_t;
typedef struct _sensor sensor_t;
typedef struct _sensor {
sensor_id_t id; // Sensor ID.
uint8_t slv_addr; // Sensor I2C slave address.
pixformat_t pixformat;
camera_status_t status;
int xclk_freq_hz;
// Sensor function pointers
int (*init_status) (sensor_t *sensor);
int (*reset) (sensor_t *sensor);
int (*set_pixformat) (sensor_t *sensor, pixformat_t pixformat);
int (*set_framesize) (sensor_t *sensor, framesize_t framesize);
int (*set_contrast) (sensor_t *sensor, int level);
int (*set_brightness) (sensor_t *sensor, int level);
int (*set_saturation) (sensor_t *sensor, int level);
int (*set_sharpness) (sensor_t *sensor, int level);
int (*set_denoise) (sensor_t *sensor, int level);
int (*set_gainceiling) (sensor_t *sensor, gainceiling_t gainceiling);
int (*set_quality) (sensor_t *sensor, int quality);
int (*set_colorbar) (sensor_t *sensor, int enable);
int (*set_whitebal) (sensor_t *sensor, int enable);
int (*set_gain_ctrl) (sensor_t *sensor, int enable);
int (*set_exposure_ctrl) (sensor_t *sensor, int enable);
int (*set_hmirror) (sensor_t *sensor, int enable);
int (*set_vflip) (sensor_t *sensor, int enable);
int (*set_aec2) (sensor_t *sensor, int enable);
int (*set_awb_gain) (sensor_t *sensor, int enable);
int (*set_agc_gain) (sensor_t *sensor, int gain);
int (*set_aec_value) (sensor_t *sensor, int gain);
int (*set_special_effect) (sensor_t *sensor, int effect);
int (*set_wb_mode) (sensor_t *sensor, int mode);
int (*set_ae_level) (sensor_t *sensor, int level);
int (*set_dcw) (sensor_t *sensor, int enable);
int (*set_bpc) (sensor_t *sensor, int enable);
int (*set_wpc) (sensor_t *sensor, int enable);
int (*set_raw_gma) (sensor_t *sensor, int enable);
int (*set_lenc) (sensor_t *sensor, int enable);
int (*get_reg) (sensor_t *sensor, int reg, int mask);
int (*set_reg) (sensor_t *sensor, int reg, int mask, int value);
int (*set_res_raw) (sensor_t *sensor, int startX, int startY, int endX, int endY, int offsetX, int offsetY, int totalX, int totalY, int outputX, int outputY, bool scale, bool binning);
int (*set_pll) (sensor_t *sensor, int bypass, int mul, int sys, int root, int pre, int seld5, int pclken, int pclk);
int (*set_xclk) (sensor_t *sensor, int timer, int xclk);
} sensor_t;
#endif /* __SENSOR_H__ */