Esp32 s3 support (#6341)

Co-authored-by: Jason2866 <24528715+Jason2866@users.noreply.github.com>
Co-authored-by: Unexpected Maker <seon@unexpectedmaker.com>
Co-authored-by: Rodrigo Garcia <rodrigo.garcia@espressif.com>
Co-authored-by: Tomáš Pilný <34927466+PilnyTomas@users.noreply.github.com>
Co-authored-by: Pedro Minatel <pedro.minatel@espressif.com>
Co-authored-by: Ivan Grokhotkov <ivan@espressif.com>
Co-authored-by: Jan Procházka <90197375+P-R-O-C-H-Y@users.noreply.github.com>
Co-authored-by: Limor "Ladyada" Fried <limor@ladyada.net>
This commit is contained in:
Me No Dev
2022-03-28 12:09:41 +03:00
committed by GitHub
parent 3f79097d5f
commit 8ee5f0a11e
3774 changed files with 685773 additions and 19284 deletions

View File

@ -0,0 +1,17 @@
#pragma once
#include <vector>
namespace dl
{
namespace detect
{
typedef struct
{
int category; /*<! category index */
float score; /*<! score of box */
std::vector<int> box; /*<! [left_up_x, left_up_y, right_down_x, right_down_y] */
std::vector<int> keypoint; /*<! [x1, y1, x2, y2, ...] */
} result_t;
}
}

View File

@ -0,0 +1,100 @@
#pragma once
#include <climits>
#include "sdkconfig.h"
#define DL_LOG_LATENCY_UNIT 0 /*<! - 1: cycle */
/*<! - 0: us */
#define DL_LOG_NN_LATENCY 0 /*<! - 1: print the latency of each parts of nn */
/*<! - 0: mute */
#define DL_LOG_LAYER_LATENCY 0 /*<! - 1: print the latency of each parts of layer */
/*<! - 0: mute */
#if CONFIG_SPIRAM_SUPPORT || CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT || CONFIG_ESP32S3_SPIRAM_SUPPORT
#define DL_SPIRAM_SUPPORT 1
#else
#define DL_SPIRAM_SUPPORT 0
#endif
#if CONFIG_IDF_TARGET_ESP32
#define CONFIG_DEFAULT_ASSIGN_CORE \
{ \
} // TODO: 多核 task 完成时,改成默认 0,1
#elif CONFIG_IDF_TARGET_ESP32S2
#define CONFIG_DEFAULT_ASSIGN_CORE \
{ \
}
#elif CONFIG_IDF_TARGET_ESP32S3
#define CONFIG_DEFAULT_ASSIGN_CORE \
{ \
} // TODO: 多核 task 完成时,改成默认 0,1
#elif CONFIG_IDF_TARGET_ESP32C3
#define CONFIG_DEFAULT_ASSIGN_CORE \
{ \
}
#else
#define CONFIG_DEFAULT_ASSIGN_CORE \
{ \
}
#endif
#define DL_Q16_MIN (-32768)
#define DL_Q16_MAX (32767)
#define DL_Q8_MIN (-128)
#define DL_Q8_MAX (127)
#ifndef DL_MAX
#define DL_MAX(x, y) (((x) < (y)) ? (y) : (x))
#endif
#ifndef DL_MIN
#define DL_MIN(x, y) (((x) < (y)) ? (x) : (y))
#endif
#ifndef DL_CLIP
#define DL_CLIP(x, low, high) ((x) < (low)) ? (low) : (((x) > (high)) ? (high) : (x))
#endif
#ifndef DL_ABS
#define DL_ABS(x) ((x) < 0 ? (-(x)) : (x))
#endif
#ifndef DL_RIGHT_SHIFT
#define DL_RIGHT_SHIFT(x, shift) ((shift) > 0) ? ((x) >> (shift)) : ((x) << -(shift))
#endif
#ifndef DL_LEFT_SHIFT
#define DL_LEFT_SHIFT(x, shift) ((shift) > 0) ? ((x) << (shift)) : ((x) >> -(shift))
#endif
namespace dl
{
typedef enum
{
Linear, /*<! Linear >*/
ReLU, /*<! ReLU >*/
LeakyReLU, /*<! LeakyReLU >*/
PReLU, /*<! PReLU >*/
// TODO: Sigmoid, /*<! Sigmoid >*/
// TODO: Softmax, /*<! Softmax*/
// TODO: TanH,
// TODO: ReLU6
} activation_type_t;
typedef enum
{
PADDING_NOT_SET,
PADDING_VALID, /*<! no padding >*/
PADDING_SAME_BEGIN, /*<! SAME in MXNET style >*/
PADDING_SAME_END, /*<! SAME in TensorFlow style >*/
} padding_type_t;
typedef enum
{
PADDING_EMPTY,
PADDING_CONSTANT,
PADDING_EDGE,
PADDING_REFLECT,
PADDING_SYMMETRIC,
} padding_mode_t;
} // namespace dl

View File

@ -0,0 +1,491 @@
#pragma once
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include <vector>
#include "dl_define.hpp"
#include "dl_variable.hpp"
#include "dl_math_matrix.hpp"
namespace dl
{
namespace image
{
typedef enum
{
IMAGE_RESIZE_BILINEAR = 0, /*<! Resize image by taking bilinear of four pixels */
IMAGE_RESIZE_MEAN = 1, /*<! Resize image by taking mean of four pixels */
IMAGE_RESIZE_NEAREST = 2 /*<! Resize image by taking the nearest pixel */
} resize_type_t;
/**
* @brief Convert RGB888 pixel to Gray.
*
* @param red red value
* @param green green value
* @param blue blue value
* @return gray value
*/
inline uint8_t convert_pixel_rgb888_to_gray(int red, int green, int blue)
{
int temp = (red * 38 + green * 75 + blue * 15) >> 7;
return DL_CLIP(temp, 0, 255);
}
/**
* @brief Convert RGB565 pixel to RGB888.
*
* @tparam T supports all integer types
* @param input pixel value in RGB565
* @param output pixel value in RGB888
*/
template <typename T>
inline void convert_pixel_rgb565_to_rgb888(uint16_t input, T *output)
{
output[0] = (input & 0x1F00) >> 5; // blue
output[1] = ((input & 0x7) << 5) | ((input & 0xE000) >> 11); // green
output[2] = input & 0xF8; // red
}
/**
* @brief Convert RGB565 image to RGB888 image.
*
* @param image ptr of RGB565 image
* @param image_shape shape of the input image
* @return Tensor<uint8_t>* output RGB88 image
*/
Tensor<uint8_t> *convert_image_rgb565_to_rgb888(uint16_t *image, std::vector<int> &image_shape);
/**
* @brief Convert RGB565 pixel to Gray.
*
* @param input pixel value in RGB565
* @return pixel value in Gray
*/
inline uint8_t convert_pixel_rgb565_to_gray(uint16_t input)
{
int blue = (input & 0x1F00) >> 5; // blue
int green = ((input & 0x7) << 5) | ((input & 0xE000) >> 11); // green
int red = input & 0xF8; // red
return convert_pixel_rgb888_to_gray(red, green, blue);
}
/**
* @brief Crop a patch from image and resize and store to destination image.
* If the cropping box is out of image, destination image will be padded with edge.
*
* The outer rectangle is the entire output image.
* The inner rectangle is where the resized image will be stored.
* In other world, this function could help you do padding while resize image.
* ___________________________(dst_w)__________________
* | ___________________________ |
* | |(x_start, y_start) | |
* | | | |
* | | | |
* (dst_h)| | | |
* | | | |
* | | | |
* | |___________________________|(x_end, y_end) |
* |____________________________________________________|
*
* @tparam T suppot all integer types
* @param dst_image pointer of destination(output) image
* @param dst_width destination image width
* @param dst_channel destination image channel number
* @param dst_y_start start y of resized image in destination image
* @param dst_y_end end y of resized image in destination image
* @param dst_x_start start x of resized image in destination image
* @param dst_x_end end x of resized image in destination image
* @param src_image pointer of source image
* @param src_height source image height
* @param src_width source image width
* @param src_channel source image channel
* @param src_y_start start y of resized image in source image
* @param src_y_end end y of resized image in source image
* @param src_x_start start x of resized image in source image
* @param src_x_end end x of resized image in source image
* @param resize_type one of IMAGE_RESIZE_BILINEAR or IMAGE_RESIZE_MEAN or IMAGE_RESIZE_NEAREST
* @param shift_left bit left shift number implemented on output
*/
template <typename T>
void crop_and_resize(T *dst_image,
int dst_width,
int dst_channel,
int dst_y_start, int dst_y_end,
int dst_x_start, int dst_x_end,
uint16_t *src_image,
int src_height,
int src_width,
int src_channel,
int src_y_start, int src_y_end,
int src_x_start, int src_x_end,
resize_type_t resize_type = IMAGE_RESIZE_NEAREST,
int shift_left = 0);
/**
* @brief Crop a patch from image and resize and store to destination image.
* If the cropping box is out of image, destination image will be padded with edge.
*
* The outer rectangle is the entire output image.
* The inner rectangle is where the resized image will be stored.
* In other world, this function could help you do padding while resize image.
* ___________________________(dst_w)__________________
* | ___________________________ |
* | |(x_start, y_start) | |
* | | | |
* | | | |
* (dst_h)| | | |
* | | | |
* | | | |
* | |___________________________|(x_end, y_end) |
* |____________________________________________________|
*
* @tparam T suppot all integer types
* @param dst_image pointer of destination(output) image
* @param dst_width destination image width
* @param dst_channel destination image channel number
* @param dst_y_start start y of resized image in destination image
* @param dst_y_end end y of resized image in destination image
* @param dst_x_start start x of resized image in destination image
* @param dst_x_end end x of resized image in destination image
* @param src_image pointer of source image
* @param src_height source image height
* @param src_width source image width
* @param src_channel source image channel
* @param src_y_start start y of resized image in source image
* @param src_y_end end y of resized image in source image
* @param src_x_start start x of resized image in source image
* @param src_x_end end x of resized image in source image
* @param resize_type one of IMAGE_RESIZE_BILINEAR or IMAGE_RESIZE_MEAN or IMAGE_RESIZE_NEAREST
* @param shift_left bit left shift number implemented on output
*/
template <typename T>
void crop_and_resize(T *dst_image,
int dst_width,
int dst_channel,
int dst_y_start, int dst_y_end,
int dst_x_start, int dst_x_end,
uint8_t *src_image,
int src_height,
int src_width,
int src_channel,
int src_y_start, int src_y_end,
int src_x_start, int src_x_end,
resize_type_t resize_type = IMAGE_RESIZE_NEAREST,
int shift_left = 0);
/**
* @brief Draw a filled rectangle on RGB888 image.
*
* @param image pointer of input image
* @param image_height height of input image
* @param image_width width of input image
* @param x1 left up corner x
* @param y1 left up corner y
* @param x2 right bottom corner x
* @param y2 right bottom corner y
* @param color 0x 00| 00| 00| 00
* reserved|channel 0|channel 1|channel 2
*/
void draw_filled_rectangle(uint8_t *image, const uint32_t image_height, const uint32_t image_width,
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
const uint32_t color = 0x00FF0000);
/**
* @brief Draw a filled rectangle on RGB565 image.
*
* @param image pointer of input image
* @param image_height height of input image
* @param image_width width of input image
* @param x1 left up corner x
* @param y1 left up corner y
* @param x2 right bottom corner x
* @param y2 right bottom corner y
* @param color 0b 000| 00000| 00000| 000
* channel 1[2:0]|channel 0|channel 2|channel 1[5:3]
*/
void draw_filled_rectangle(uint16_t *image, const uint32_t image_height, const uint32_t image_width,
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
const uint16_t color = 0b0001111100000000);
/**
* @brief Draw a point on RGB888 image.
*
* @param image pointer of input image
* @param image_height height of input image
* @param image_width width of input image
* @param x point x
* @param y point y
* @param size size of point
* @param color 0x 00| 00| 00| 00
* reserved|channel 0|channel 1|channel 2
*/
void draw_point(uint8_t *image, const uint32_t image_height, const uint32_t image_width,
const uint32_t x, const uint32_t y, const uint32_t size,
const uint32_t color = 0x00FF0000);
/**
* @brief Draw a point on RGB565 image.
*
* @param image pointer of input image
* @param image_height height of input image
* @param image_width width of input image
* @param x point x
* @param y point y
* @param size size of point
* @param color 0b 000| 00000| 00000| 000
* channel 1[2:0]|channel 0|channel 2|channel 1[5:3]
*/
void draw_point(uint16_t *image, const uint32_t image_height, const uint32_t image_width,
const uint32_t x, const uint32_t y, const uint32_t size,
uint16_t color = 0b0001111100000000);
/**
* @brief Draw a hollow rectangle on RGB888 image.
*
* @param image pointer of input image
* @param image_height height of input image
* @param image_width width of input image
* @param x1 left up corner x
* @param y1 left up corner y
* @param x2 right bottom corner x
* @param y2 right bottom corner y
* @param color 0x 00| 00| 00| 00
* reserved|channel 0|channel 1|channel 2
*/
void draw_hollow_rectangle(uint8_t *image, const uint32_t image_height, const uint32_t image_width,
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
uint32_t color = 0x00FF0000);
/**
* @brief Draw a hollow rectangle on RGB565 image.
*
* @param image pointer of input image
* @param image_height height of input image
* @param image_width width of input image
* @param x1 left up corner x
* @param y1 left up corner y
* @param x2 right bottom corner x
* @param y2 right bottom corner y
* @param color 0b 000| 00000| 00000| 000
* channel 1[2:0]|channel 0|channel 2|channel 1[5:3]
*/
void draw_hollow_rectangle(uint16_t *image, const uint32_t image_height, const uint32_t image_width,
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
const uint16_t color = 0b0001111100000000);
/**
* @brief Detect target moving by activated detection point number. Each cross in the figure below is a detection point.
* Once abs(frame_1_detection_point[i] - frame_2_detection_point[i]) > threshold, this detection point is activated.
* This function will return the number of activated detection point.
*
* __stride__________________________
* | | | | |
* stride | | | | |
* | | | | |
* |________|________|________| |
* | | | | |
* | | | | |
* | | | | |
* |________|________|________| height
* | | | | |
* | | | | |
* | | | | |
* |________|________|________| |
* | | | | |
* | | | | |
* | | | | |
* |________|________|________|___|___
* | |
* |__________width___________|
* | |
*
* Time consumption:
* Frame shape = (240, 240)
* Both frame are in PSRAM
* On ESP32-S3 with CPU 240MHz, QSPI 80MHz
*
* stride latency
* 1 28316us
* 2 8770us
* 4 3622us
* 8 1990us
* 16 880us
* 32 260us
*
*
* In a application, outside this function, threshold of activated detection point number is needed.
* Once activated detection point number > number_threshold, this two frame are judged target moved.
* How to determine the number_threshold?
* Let's assume that the minimize shape of target is (target_min_height, target_max_width).
* Then, the number_threshold = [target_min_height / stride] * [target_max_width / stride] * ratio,
* where ratio is in (0, 1), the smaller the ratio is, the more sensitive the detector is, the more false detected.
*
*
* @param f1 one frame in RGB565
* @param f2 another frame in RGB565
* @param height height of frame
* @param width width of frame
* @param stride stride of detection point, the smaller the stride is, the more reliable the detector is.
* @param threshold activation threshold of each detection point
* @return activated detection point number
*/
uint32_t get_moving_point_number(uint16_t *f1, uint16_t *f2, const uint32_t height, const uint32_t width, const uint32_t stride, const uint32_t threshold = 5);
/**
* @brief Detect target moving by activated detection point number. Each cross in the figure below is a detection point.
* Once abs(frame_1_detection_point[i] - frame_2_detection_point[i]) > threshold, this detection point is activated.
* This function will return the number of activated detection point.
*
* __stride__________________________
* | | | | |
* stride | | | | |
* | | | | |
* |________|________|________| |
* | | | | |
* | | | | |
* | | | | |
* |________|________|________| height
* | | | | |
* | | | | |
* | | | | |
* |________|________|________| |
* | | | | |
* | | | | |
* | | | | |
* |________|________|________|___|___
* | |
* |__________width___________|
* | |
*
*
* In a application, outside this function, threshold of activated detection point number is needed.
* Once activated detection point number > number_threshold, this two frame are judged target moved.
* How to determine the number_threshold?
* Let's assume that the minimize shape of target is (target_min_height, target_max_width).
* Then, the number_threshold = [target_min_height / stride] * [target_max_width / stride] * ratio,
* where ratio is in (0, 1), the smaller the ratio is, the more sensitive the detector is, the more false detected.
*
*
* @param f1 one frame in RGB888
* @param f2 another frame in RGB888
* @param height height of frame
* @param width width of frame
* @param stride stride of detection point, the smaller the stride is, the more reliable the detector is.
* @param threshold activation threshold of each detection point
* @return activated detection point number
*/
uint32_t get_moving_point_number(uint8_t *f1, uint8_t *f2, const uint32_t height, const uint32_t width, const uint32_t stride, const uint32_t threshold = 5);
/**
* @brief Apply an affine transformation to an image.
*
* @tparam T
* @param input the input image.
* @param output the output image.
* @param M_inv the inverse transformation matrix.
*/
template <typename T>
void warp_affine(dl::Tensor<T> *input, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);
/**
* @brief Apply an affine transformation to an image.
*
* @tparam T
* @param input the pointer of the input image.
* @param shape the shape of the input image.
* @param output the output image.
* @param M_inv the inverse transformation matrix.
*/
template <typename T>
void warp_affine(uint16_t *input, std::vector<int> shape, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);
/**
* @brief Get the otsu thresh object.
*
* @param image the gray image.
* @return uint8_t the otsu thresh.
*/
uint8_t get_otsu_thresh(Tensor<uint8_t> &image);
/**
* @brief Convert RGB image to gray image
*
* @param image input image
* @param bgr true: the image is in BGR format
* false: the image is in RGB format
* @return Tensor<uint8_t>* output image in gray format
*/
Tensor<uint8_t> *rgb2gray(Tensor<uint8_t> &image, bool bgr = false);
/**
* @brief Convert RGB image to LAB image
*
* @param image input image
* @param bgr true: the image is in BGR format
* false: the image is in RGB format
* @param fast true: use the fast alogrithm but the accuracy will be reduced
* false: do not use the fast alogrithm
* @return Tensor<uint8_t>* output image in LAB foramt
*/
Tensor<uint8_t> *rgb2lab(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);
/**
* @brief Convert RGB image to HSV image
*
* @param image input image
* @param bgr true: the image is in BGR format
* false: the image is in RGB format
* @param fast true: use the fast alogrithm but the accuracy will be reduced
* false: do not use the fast alogrithm
* @return Tensor<uint8_t>* output image in HSV format
*/
Tensor<uint8_t> *rgb2hsv(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);
/**
* @brief resize an image to the target shape.
*
* @param image the input image Tensor
* @param target_shape the target shape of the resized image.
* @param resize_type one of IMAGE_RESIZE_BILINEAR or IMAGE_RESIZE_MEAN or IMAGE_RESIZE_NEAREST
* @return Tensor<uint8_t>* the pointer of the resized image Tensor
*/
Tensor<uint8_t> *resize_image(Tensor<uint8_t> &image, std::vector<int> target_shape, resize_type_t resize_type);
/**
* @brief resize an image to the target shape.
*
* @param image the input image Tensor
* @param resized_image the resized image Tensor
* @param resize_type one of IMAGE_RESIZE_BILINEAR or IMAGE_RESIZE_MEAN or IMAGE_RESIZE_NEAREST
*/
void resize_image(Tensor<uint8_t> &image, Tensor<uint8_t> &resized_image, resize_type_t resize_type);
/**
* @brief resize an image to the target shape with nearest method.
*
* @tparam T
* @param image the pointer of the input image
* @param input_shape the input shape of the image
* @param target_shape the target shape of the resized image
* @return T* the pointer of the resized image
*/
template <typename T>
T *resize_image_nearest(T *image, std::vector<int> input_shape, std::vector<int> target_shape);
/**
* @brief resize an image to the target shape with nearest method.
*
* @tparam T
* @param image the pointer of the input image
* @param input_shape the input shape of the image
* @param resized_image the pointer of the resized image
* @param target_shape the target shape of the resized image
*/
template <typename T>
void resize_image_nearest(T *image, std::vector<int> input_shape, T *resized_image, std::vector<int> target_shape);
} // namespace image
} // namespace dl

View File

@ -0,0 +1,145 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_add2d.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Activation(Add2D(input0, input1)).
* NOTE: addition is element-wise, i.e., output[i,j,k] = input0[i,j,k] + input1[i,j,k]
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Add2D : public Layer
{
private:
const Activation<feature_t> *activation; /*<! activation of add2d, if you don't specify anything, no activation is applied >*/
const int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of add2d >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of add2d >*/
public:
/**
* @brief Construct a new Add2D object.
*
* @param output_exponent exponent of output
* @param activation activation of add2d, if you don't specify anything, no activation is applied
* @param name name of add2d
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Add2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = "Add2D", bool inplace = false) : Layer(name),
activation(activation),
output_exponent(output_exponent),
output(NULL),
inplace(inplace),
output_shape({}) {}
/**
* @brief Destroy the Add2D object
*/
~Add2D()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape.
* NOTE: input0.shape must equal to input1.shape.
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
this->output_shape = input0.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(input0.shape);
this->output->free_element();
}
else
{
this->output = &input0;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Add2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Add2D operation.
*
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @return Tensor<feature_t>& added result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::add2d(*this->output, input0, input1, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "add2d");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::add2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "add2d");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,161 @@
#pragma once
#include <vector>
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_avg_pool2d.hpp"
namespace dl
{
namespace layer
{
/**
* @brief AvgPool2D(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class AvgPool2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of AvgPool2D >*/
std::vector<int> output_shape; /*<! output shape of AvgPool2D >*/
public:
/**
* @brief Construct a new AvgPool2D object.
*
* @param output_exponent exponent of output
* @param filter_shape filter shape in [filter_height, filter_width]
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y stride in height
* @param stride_x stride in width
* @param name name of layer
*/
AvgPool2D(const int output_exponent,
const std::vector<int> filter_shape,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = "AvgPool2D") : Layer(name),
output_exponent(output_exponent),
filter_shape(filter_shape),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
padding(padding),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}
/**
* @brief Destroy the AvgPool2D object.
*
*/
~AvgPool2D()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output shape and padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
}
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& AvgPool2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call AvgPool2D operation
*
* @param input as an input
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @return AvgPool2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}
DL_LOG_LAYER_LATENCY_START();
nn::avg_pool2d(*this->output, input, this->padding, this->filter_shape, this->stride_y, this->stride_x);
DL_LOG_LAYER_LATENCY_END(this->name, "avg_pool2d");
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,56 @@
#pragma once
#include "dl_tool.hpp"
#include "dl_tool_cache.hpp"
#include <iostream>
namespace dl
{
namespace layer
{
/**
* @brief Base class for layer.
*
*/
class Layer
{
public:
char *name; /*<! name of layer >*/
/**
* @brief Construct a new Layer object.
*
* @param name name of layer.
*/
Layer(const char *name = NULL);
/**
* @brief Destroy the Layer object. Return resource.
*
*/
~Layer();
};
} // namespace layer
} // namespace dl
#if DL_LOG_LAYER_LATENCY
/**
* @brief Initialize.
*/
#define DL_LOG_LAYER_LATENCY_INIT() dl::tool::Latency latency
/**
* @brief Time starts.
*/
#define DL_LOG_LAYER_LATENCY_START() latency.start()
/**
* @brief Time ends and printed.
*/
#define DL_LOG_LAYER_LATENCY_END(prefix, key) \
latency.end(); \
latency.print(prefix, key)
#else
#define DL_LOG_LAYER_LATENCY_INIT()
#define DL_LOG_LAYER_LATENCY_START()
#define DL_LOG_LAYER_LATENCY_END(prefix, key)
#endif

View File

@ -0,0 +1,139 @@
#pragma once
#include <assert.h>
#include <vector>
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
#include "dl_nn_concat.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Concat(input1, input2, input3, ...).
*
* @tparam feature_t support all kinds of integer and float data type
*/
template <typename feature_t>
class Concat : Layer
{
private:
int output_exponent; /*<! exponent of output >*/
int axis; /*<! The axis along which the Tensor will be concatenated. >*/
Tensor<feature_t> *output; /*<! output ptr of Concat >*/
std::vector<int> output_shape; /*<! output shape of Concat >*/
public:
/**
* @brief Construct a new Concat object.
*
* @param name name of layer
* @param axis The axis along which the Tensor will be concatenated.
*/
Concat(int axis, const char *name = "Concat") : Layer(name), axis(axis), output_shape({})
{
this->output = new Tensor<feature_t>;
}
/**
* @brief Destroy the Concat object
*/
~Concat()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Collect inputs' channel and memory offset, called in Model.build().
*
* @param args pointers of concatenated Tensor
* @param print_shape whether to print the output shape.
*/
void build(std::vector<Tensor<feature_t> *> args, bool print_shape = false)
{
assert(args.size() > 1);
int shape_size = args[0]->shape.size();
if (this->axis < 0)
{
this->axis = shape_size + this->axis;
}
assert((this->axis < shape_size) && (this->axis > -1));
int output_shape_axis = args[0]->shape[this->axis];
for (int i = 1; i < args.size(); i++)
{
assert(shape_size == args[i]->shape.size());
assert(args[i]->exponent == args[i - 1]->exponent);
output_shape_axis += args[i]->shape[this->axis];
for (int j = 0; j < shape_size; j++)
{
if (j != this->axis)
{
assert(args[i]->shape[j] == args[i - 1]->shape[j]);
}
}
}
this->output_exponent = args[0]->exponent;
this->output_shape = args[0]->shape;
this->output_shape[this->axis] = output_shape_axis;
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Call Concat operation
*
* @param inputs the pointers of inputs
* @param free_inputs true: free the inputs after call
* false: do not free inputs
* @return Tensor<feature_t>& concat result
*/
Tensor<feature_t> &call(std::vector<Tensor<feature_t> *> inputs, bool free_inputs = false)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::concat(*this->output, inputs, this->axis, free_inputs);
DL_LOG_LAYER_LATENCY_END(this->name, "concat");
return *this->output;
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Concat result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,179 @@
#pragma once
#include <assert.h>
#include <vector>
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Concat2D(input1, input2, input3, ...).
*
* @tparam feature_t support all kinds of integer and float data type
*/
template <typename feature_t>
class Concat2D : Layer
{
private:
std::vector<Tensor<feature_t> *> output_vec; /*<! pointers of concatenated inputs >*/
std::vector<int> offset; /*<! memory offset of each concatenated inputs in entire element >*/
std::vector<int> channel; /*<! channel of concatenated inputs >*/
Tensor<feature_t> *output; /*<! output ptr of Concat2D >*/
int output_exponent; /*<! exponent of output >*/
public:
/**
* @brief Construct a new Concat2D object.
*
* @param name name of layer
*/
Concat2D(const char *name = NULL) : Layer(name) {
this->output = new Tensor<feature_t>;
}
/**
* @brief Destroy the Concat2D object
*/
~Concat2D()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Collect inputs' channel and memory offset, called in Model.build().
*
* @param args pointers of concatenated Tensor
*/
void build(std::vector<Tensor<feature_t> *> args)
{
assert(args.size() > 0);
this->output_vec = args;
this->offset = std::vector<int>(args.size());
this->channel = std::vector<int>(args.size());
this->output_exponent = args[0]->exponent;
this->offset[0] = 0;
this->channel[0] = args[0]->shape[2];
std::vector<int> output_shape = args[0]->shape;
for (int i = 1; i < args.size(); i++)
{
assert(output_shape[0] == args[i]->shape[0]); // height
assert(output_shape[1] == args[i]->shape[1]); // width
// assert(this->output_exponent == args[i]->exponent); // exponent
this->offset[i] = output_shape[2];
this->channel[i] = args[i]->shape[2];
output_shape[2] += args[i]->shape[2];
}
this->output->set_shape(output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Concat2d result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Get the maximum padding among inputs and output-> Then, set to this->output. Called at the end of Model.build().
* NOTE: Some special situations like C = Concat2D_1(A, B), E = Concat2D_2(C, D), where A, B, C, D, E are Tensor.
* For avoiding memory copy, we apply an entire element for E, and take it apart for A, B, D.
* A, B, C, D and E will become other layer's inputs so that result different size of padding.
* For get the maximum padding, we should call at the end of Model.build(),
* Concat2D_1.backward(); // max_padding_temp = get_max_padding(A, B, C), padding of A, B and C are set to max_padding_temp.
* Concat2D_2.backward(); // max_padding = get_max_padding(max_padding_temp, get_max_padding(D, E)) , padding of C, D and E are set to max_padding.
* However, padding of A and B is still max_padding_temp.
* Concat2D_1.backward(); // padding of A and B are set to max_padding.
* Or,
* Concat2D_2.backward();
* Concat2D_1.backward();
* Concat2D_2.backward();
*/
void backward()
{
std::vector<int> max_padding = this->output->padding;
int max_channel_with_padding = this->output->shape_with_padding[2];
for (int i = 0; i < this->output_vec.size(); i++)
{
for (int j = 0; j < max_padding.size(); j++)
{
max_padding[j] = DL_MAX(max_padding[j], this->output_vec[i]->padding[j]);
}
max_channel_with_padding = DL_MAX(max_channel_with_padding, this->output_vec[i]->shape_with_padding[2]);
}
this->output->set_padding_size(max_padding);
this->output->shape_with_padding[2] = max_channel_with_padding;
for (int i = 0; i < this->output_vec.size(); i++)
{
this->output_vec[i]->set_padding_size(max_padding);
this->output_vec[i]->shape_with_padding[2] = max_channel_with_padding;
#if CONFIG_DEBUG_MODE
assert(this->output->shape_with_padding[0] == this->output_vec[i]->shape_with_padding[0]);
assert(this->output->shape_with_padding[1] == this->output_vec[i]->shape_with_padding[1]);
assert(this->output->shape_with_padding[2] == this->output_vec[i]->shape_with_padding[2]);
#endif
}
}
/**
* @brief Calloc an entire element for concatnate result. Take the entire element apart and deliver element pointers to concatenated layer.
* NOTE: For example, C = Concat2D(A, B). We apply an entire element for C and deliver two element pointers to A and B.
* Let's assume that A result is produced first. We should call Concat2D.calloc_element() just before A result is produced
* to make sure the element of A is ready and could be filled.
*/
void calloc_element()
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
this->output->calloc_element();
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
for (int i = 0; i < this->offset.size(); i++)
{
this->output_vec[i]->element = this->output->element + this->offset[i];
this->output_vec[i]->set_auto_free(false);
}
DL_LOG_LAYER_LATENCY_END(this->name, "deliver");
}
void apply_element()
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
this->output->apply_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
for (int i = 0; i < this->offset.size(); i++)
{
this->output_vec[i]->element = this->output->element + this->offset[i];
this->output_vec[i]->set_auto_free(false);
}
DL_LOG_LAYER_LATENCY_END(this->name, "deliver");
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,186 @@
#pragma once
#include "dl_nn_conv2d.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Activation(Conv2D(input, filter) + bias).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
* - int16_t: for int16 quantization and int8 per-channel quantization
* - int8_t: for int8 per-tensor quantization
*/
template <typename feature_t, typename bias_t = feature_t>
class Conv2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const Filter<feature_t> *filter; /*<! filter of Conv2D >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
const Bias<bias_t> *bias; /*<! bias of Conv2D, if you don't specify anything, no bias is added >*/
const Activation<feature_t> *activation; /*<! activation of Conv2D, if you don't specify anything, no activation is applied >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of Conv2D >*/
std::vector<int> output_shape; /*<! output shape of Conv2D >*/
public:
/**
* @brief Construct a new Conv2D object.
*
* @param output_exponent exponent of output
* @param filter filter of Conv2D
* @param bias bias of Conv2D, if you don't specify anything, no bias is added
* @param activation activation of Conv2D, if you don't specify anything, no activation is applied
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y stride in height
* @param stride_x stride in width
* @param name name of layer
*/
Conv2D(const int output_exponent,
const Filter<feature_t> *filter,
const Bias<bias_t> *bias = NULL,
const Activation<feature_t> *activation = NULL,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = "Conv2D") : Layer(name),
output_exponent(output_exponent),
filter(filter),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
bias(bias),
activation(activation),
padding(padding),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}
/**
* @brief Destroy the Conv2D object.
*
*/
~Conv2D()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output padding and input padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
assert(this->filter->shape.size() == 4);
assert(input.shape[2] == this->filter->shape[2]);
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, true, this->padding);
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Conv2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Conv2D operation
*
* @param input as an input.
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return Conv2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}
DL_LOG_LAYER_LATENCY_START();
nn::conv2d(*this->output, input, this->padding, *(this->filter), this->stride_y, this->stride_x, this->bias, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "conv2d");
return *this->output;
}
/**
* @brief Preload the filter to Cache.
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is doing calculation.
*/
void preload()
{
size_t size = sizeof(feature_t);
int shape_size = this->filter->shape.size();
for (int i = 0; i < shape_size; ++i)
{
size *= filter->shape[i];
}
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,188 @@
#pragma once
#include "dl_nn_depthwise_conv2d.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Activation(DepthwiseConv2D(filter, input) + bias).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
* - int16_t: for int16 quantization and int8 per-channel quantization
* - int8_t: for int8 per-tensor quantization
*/
template <typename feature_t, typename bias_t = feature_t>
class DepthwiseConv2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const Filter<feature_t> *filter; /*<! filter of DepthwiseConv2D >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
const Bias<bias_t> *bias; /*<! bias of DepthwiseConv2D, if you don't specify anything, no bias is added >*/
const Activation<feature_t> *activation; /*<! activation of DepthwiseConv2D, if you don't specify anything, no activation is applied >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of DepthwiseConv2D >*/
std::vector<int> output_shape; /*<! output shape of DepthwiseConv2D >*/
public:
/**
* @brief Construct a new DepthwiseConv2D object.
*
* @param output_exponent exponent of output
* @param filter filter of DepthwiseConv2D
* @param bias bias of DepthwiseConv2D, if you don't specify anything, no bias is added
* @param activation activation of DepthwiseConv2D, if you don't specify anything, no activation is applied
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y - stride in height
* @param stride_x - stride in width
* @param name name of layer
*/
DepthwiseConv2D(const int output_exponent,
const Filter<feature_t> *filter,
const Bias<bias_t> *bias = NULL,
const Activation<feature_t> *activation = NULL,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = "DepthwiseConv2D") : Layer(name),
output_exponent(output_exponent),
filter(filter),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
bias(bias),
activation(activation),
padding(padding),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}
/**
* @brief Destroy the DepthwiseConv2D object.
*
*/
~DepthwiseConv2D()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output shape and padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
assert(this->filter->shape.size() == 4);
assert(input.shape[2] == this->filter->shape[2]);
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
}
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& DepthwiseConv2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call DepthwiseConv2D operation.
*
* @param input as an input
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return DepthwiseConv2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}
DL_LOG_LAYER_LATENCY_START();
nn::depthwise_conv2d(*this->output, input, this->padding, *(this->filter), this->stride_y, this->stride_x, this->bias, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "depthwise_conv2d");
return *this->output;
}
/**
* @brief Preload the filter to Cache.
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is calculating.
*/
void preload()
{
size_t size = sizeof(feature_t);
int shape_size = this->filter->shape.size();
for (int i = 0; i < shape_size; ++i)
{
size *= filter->shape[i];
}
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,130 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief
*
* @tparam feature_t
*/
template <typename feature_t>
class ExpandDims : public Layer
{
private:
std::vector<int> output_shape; /*<! output shape of ExpandDims >*/
std::vector<int> axis; /*<! position where the new axis is placed. >*/
Tensor<feature_t> *output; /*<! output ptr of ExpandDims >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
public:
int output_exponent;
/**
* @brief Construct a new ExpandDims object
*
* @param axis position where the new axis is placed.
* @param name name of layer
* @param inplace true: the output will store to input
* false: the output will store to a separate memory
*/
ExpandDims(std::vector<int> axis, const char *name = "ExpandDims", bool inplace = false) : Layer(name),
output_shape({}),
axis(axis),
output(NULL),
inplace(inplace)
{
}
/**
* @brief Destroy the ExpandDims object
*
*/
~ExpandDims()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape.
*
* @param input as an input.
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_exponent = input.exponent;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(input.shape);
this->output->expand_dims(this->axis);
this->output->free_element();
}
else
{
this->output = &input;
this->output->expand_dims(this->axis);
}
this->output_shape = this->output->shape;
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& ExpandDims result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief call ExpandDims opeartion
*
* @param input
* @return Tensor<feature_t>& ExpandDims result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_exponent(input.exponent);
this->output->set_shape(this->output_shape);
this->output->copy_element(input, true);
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
}
else
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_shape(this->output_shape);
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,120 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief
*
* @tparam feature_t
*/
template <typename feature_t>
class Flatten : public Layer
{
private:
int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of Flatten >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of Flatten >*/
public:
/**
* @brief Construct a new Flatten object
*
* @param name name of layer
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Flatten(const char *name = "Flatten", bool inplace = false) : Layer(name), output(NULL), inplace(inplace), output_shape({})
{}
/**
* @brief Destroy the Flatten object
*
*/
~Flatten()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_exponent = input.exponent;
this->output_shape = {input.get_size()};
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Flatten result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Flatten operation.
*
* @param input as an input
* @return Tensor<feature_t>& Flatten result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_exponent(input.exponent);
this->output->flatten();
this->output->copy_element(input, true);
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
}
else
{
DL_LOG_LAYER_LATENCY_START();
this->output->flatten();
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,167 @@
#pragma once
#include "dl_nn_fully_connected.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Activation(FullyConnected(input, filter) + bias).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
* - int16_t: for int16 quantization and int8 per-channel quantization
* - int8_t: for int8 per-tensor quantization
*/
template <typename feature_t, typename bias_t = feature_t>
class FullyConnected : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const bool flatten; /*<! true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim] >*/
const Filter<feature_t> *filter; /*<! filter of FullyConnected >*/
const Bias<bias_t> *bias; /*<! bias of FullyConnected, if you don't specify anything, no bias is added >*/
const Activation<feature_t> *activation; /*<! activation of FullyConnected, if you don't specify anything, no activation is applied >*/
Tensor<feature_t> *output; /*<! output ptr of FullyConnected >*/
std::vector<int> output_shape; /*<! output shape of FullyConnected >*/
public:
/**
* @brief Construct a new FullyConnected object.
*
* @param output_exponent exponent of output
* @param filter filter of FullyConnected
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
* @param name name of layer
*/
FullyConnected(const int output_exponent,
const Filter<feature_t> *filter,
const Bias<bias_t> *bias = NULL,
const Activation<feature_t> *activation = NULL,
const bool flatten = true,
const char *name = "FullyConnected") : Layer(name),
output_exponent(output_exponent),
flatten(flatten),
filter(filter),
bias(bias),
activation(activation),
output_shape({})
{
this->output = new Tensor<feature_t>;
}
/**
* @brief Destroy the FullyConnected object.
*
*/
~FullyConnected()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output padding and input padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(this->filter->shape.size() == 4);
assert(this->filter->shape[0] == 1);
assert(this->filter->shape[1] == 1);
if (this->flatten)
{
assert(input.get_size() == this->filter->shape[2]);
this->output_shape = {this->filter->shape[3]};
}
else
{
assert(input.shape.back() == this->filter->shape[2]);
this->output_shape = input.shape;
this->output_shape[this->output_shape.size() - 1] = this->filter->shape[3];
}
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& FullyConnected result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call FullyConnected operation
*
* @param input as an input.
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return FullyConnected result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}
DL_LOG_LAYER_LATENCY_START();
nn::fully_connected(*this->output, input, *(this->filter), this->bias, this->activation, this->flatten, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "fully_connected");
return *this->output;
}
/**
* @brief Preload the filter to Cache.
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is doing calculation.
*/
void preload()
{
size_t size = sizeof(feature_t);
int shape_size = this->filter->shape.size();
for (int i = 0; i < shape_size; ++i)
{
size *= filter->shape[i];
}
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,126 @@
#pragma once
#include <vector>
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_global_avg_pool2d.hpp"
namespace dl
{
namespace layer
{
/**
* @brief GlobalAveragePool2D(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class GlobalAveragePool2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
std::vector<int> output_shape; /*<! output shape of GlobalAveragePool2D >*/
Tensor<feature_t> *output; /*<! output ptr of GlobalAveragePool2D >*/
public:
/**
* @brief Construct a new GlobalAveragePool2D object.
*
* @param output_exponent exponent of output
* @param name name of layer
*/
GlobalAveragePool2D(const int output_exponent, const char *name = "GlobalAveragePool2D") : Layer(name),
output_exponent(output_exponent),
output_shape({})
{
this->output = new Tensor<feature_t>;
}
/**
* @brief Destroy the GlobalAveragePool2D object.
*
*/
~GlobalAveragePool2D()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output shape.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
std::vector<int> output_shape(input.shape.size(), 1);
output_shape[2] = input.shape[2];
this->output_shape = output_shape;
this->output->set_shape(this->output_shape);
this->output->set_exponent(this->output_exponent);
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& GlobalAveragePool2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call GlobalAveragePool2D operation
*
* @param input as an input
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return GlobalAveragePool2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}
DL_LOG_LAYER_LATENCY_START();
nn::global_avg_pool2d(*this->output, input);
DL_LOG_LAYER_LATENCY_END(this->name, "global_avg_pool2d");
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,121 @@
#pragma once
#include <vector>
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_global_max_pool2d.hpp"
namespace dl
{
namespace layer
{
/**
* @brief GlobalMaxPool2D(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class GlobalMaxPool2D : public Layer
{
private:
Tensor<feature_t> *output; /*<! output ptr of GlobalMaxPool2D >*/
std::vector<int> output_shape; /*<! output shape of GlobalMaxPool2D >*/
public:
/**
* @brief Construct a new GlobalMaxPool2D object.
*
* @param name name of layer
*/
GlobalMaxPool2D(const char *name = "GlobalMaxPool2D") : Layer(name), output_shape({})
{
this->output = new Tensor<feature_t>;
}
/**
* @brief Destroy the GlobalMaxPool2D object.
*
*/
~GlobalMaxPool2D()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
this->output->set_exponent(input.exponent);
std::vector<int> output_shape(input.shape.size(), 1);
output_shape[2] = input.shape[2];
this->output_shape = output_shape;
this->output->set_shape(this->output_shape);
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& GlobalMaxPool2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call GlobalMaxPool2D operation
*
* @param input as an input
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return GlobalMaxPool2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}
DL_LOG_LAYER_LATENCY_START();
nn::global_max_pool2d(*this->output, input);
DL_LOG_LAYER_LATENCY_END(this->name, "global_max_pool2d");
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,141 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_leakyrelu.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief LeakyRelu(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class LeakyRelu : public Layer
{
private:
feature_t activation_alpha; /*<! quantized alpha >*/
int activation_exponent; /*<! exponent of quantized alpha >*/
Tensor<feature_t> *output; /*<! output ptr of leakyrelu>*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
public:
/**
* @brief Construct a new LeakyRelu object
*
* @param activation_alpha quantized alpha
* @param activation_exponent exponent of quantized alpha
* @param name name of leakyrelu
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
LeakyRelu(const int activation_alpha, const int activation_exponent, const char *name = "LeakyRelu", bool inplace = false) : Layer(name), output(NULL), output_shape({})
{
this->activation_alpha = activation_alpha;
this->activation_exponent = activation_exponent;
this->inplace = inplace;
}
/**
* @brief Destroy the LeakyRelu object
*
*/
~LeakyRelu()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_shape = input.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_shape(this->output_shape);
this->output->set_exponent(input.exponent);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& LeakyRelu result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call LeakyRelu operation.
*
* @param input as an input
* @param assign_core not effective yet
* @return LeakyRelu result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,143 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_nn_max2d.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Max2D(input0, input1).
* NOTE: maximum is element-wise, i.e., output[i,j,k] = max(input0[i,j,k], input1[i,j,k])
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Max2D : public Layer
{
private:
Tensor<feature_t> *output; /*<! output ptr of max2d >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of max2d >*/
public:
/**
* @brief Construct a new Max2D object.
*
* @param name name of max2d
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Max2D(const char *name = "Max2D", bool inplace = false) : Layer(name),
output(NULL), inplace(inplace), output_shape({})
{
}
/**
* @brief Destroy the Max2D object
*
*/
~Max2D()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
* NOTE: input0.shape must equal to input1.shape.
* input0.exponent must equal to input1.exponent.
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
assert(input0.exponent == input1.exponent);
this->output_shape = input0.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(input0.exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input0;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Max2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Max2D operation.
*
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @return Max2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input0.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::max2d(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::max2d(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,157 @@
#pragma once
#include <vector>
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_max_pool2d.hpp"
namespace dl
{
namespace layer
{
/**
* @brief MaxPool2D(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class MaxPool2D : public Layer
{
private:
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
const int stride_y; /*<! stride in height >*/
const int stride_x; /*<! stride in width >*/
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
Tensor<feature_t> *output; /*<! output ptr of MaxPool2D >*/
std::vector<int> output_shape; /*<! output shape of MaxPool2D >*/
public:
/**
* @brief Construct a new MaxPool2D object.
*
* @param filter_shape filter shape in [filter_height, filter_width]
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
* - PADDING_VALID means no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* - PADDING_NOT_SET means padding with the specific "padding" value below.
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
* @param stride_y stride in height
* @param stride_x stride in width
* @param name name of layer
*/
MaxPool2D(const std::vector<int> filter_shape,
const padding_type_t padding_type = PADDING_VALID,
std::vector<int> padding = {},
const int stride_y = 1,
const int stride_x = 1,
const char *name = "MaxPool2D") : Layer(name),
filter_shape(filter_shape),
stride_y(stride_y),
stride_x(stride_x),
padding_type(padding_type),
padding(padding),
output_shape({})
{
this->output = new Tensor<feature_t>;
if (this->padding_type == PADDING_NOT_SET)
{
assert(this->padding.size() == 4);
}
}
/**
* @brief Destroy the MaxPool2D object.
*
*/
~MaxPool2D()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output shape and padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(input.shape[0] > 0);
assert(input.shape[1] > 0);
assert(input.shape.size() == 3);
this->output->set_exponent(input.exponent);
this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
this->output->set_shape(this->output_shape);
if (this->padding_type != PADDING_NOT_SET)
{
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
}
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& MaxPool2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call MaxPool2D operation
*
* @param input as an input
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return MaxPool2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
if (autoload_enable)
{
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
}
DL_LOG_LAYER_LATENCY_START();
nn::max_pool2d(*this->output, input, this->padding, this->filter_shape, this->stride_y, this->stride_x);
DL_LOG_LAYER_LATENCY_END(this->name, "max_pool2d");
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,143 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_nn_min2d.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Min2D(input0, input1).
* NOTE: minimum is element-wise, i.e., output[i,j,k] = min(input0[i,j,k], input1[i,j,k])
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Min2D : public Layer
{
private:
Tensor<feature_t> *output; /*<! output of ptr min2d>*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of min2d >*/
public:
/**
* @brief Construct a new Min2D object
*
* @param name name of min2d
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Min2D(const char *name = "Min2D", bool inplace = false) : Layer(name),
output(NULL),
inplace(inplace),
output_shape({}) {}
/**
* @brief Destroy the Min2D object
*
*/
~Min2D()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
* NOTE: input0.shape must equal to input1.shape.
* input0.exponent must equal to input1.exponent.
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
assert(input0.exponent == input1.exponent);
this->output_shape = input0.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_shape(this->output_shape);
this->output->set_exponent(input0.exponent);
this->output->free_element();
}
else
{
this->output = &input0;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Min2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Min2D operation
*
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @return Min2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input0.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::min2d(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::min2d(*this->output, input0, input1, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,52 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Neural Network Model.
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Model
{
private:
std::vector<int> input_shape; /*<! input shape in [height, width, channel] >*/
public:
/**
* @brief Destroy the Model object.
*
*/
virtual ~Model() {}
/**
* @brief Build a model including update output shape and input padding of each layer.
*
* @param input as an input
*/
virtual void build(Tensor<feature_t> &input) = 0;
/**
* @brief Call the model layer by layer.
*
* @param input as an input.
*/
virtual void call(Tensor<feature_t> &input) = 0;
/**
* @brief If input.shape changes, call Model.build(), otherwise, do not. Then call Model.call().
*
* @param input as an input
*/
void forward(Tensor<feature_t> &input);
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,151 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_mul2d.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Activation(Multiply2D(input0, input1)).
* NOTE: multiplication is element-wise, i.e., output[i,j,k] = input0[i,j,k] * input1[i,j,k]
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Mul2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const Activation<feature_t> *activation; /*<! activation of Mul2D, if you don't specify anything, no activation is applied >*/
Tensor<feature_t> *output; /*<! output ptr of Mul2D >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of Mul2D >*/
public:
/**
* @brief Construct a new Mul2D object.
*
* @param output_exponent exponent of output
* @param activation activation of Mul2D, if you don't specify anything, no activation is applied
* @param name name of layer
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Mul2D(const int output_exponent,
const Activation<feature_t> *activation = NULL,
const char *name = "Mul2D",
bool inplace = false) : Layer(name),
output_exponent(output_exponent),
activation(activation),
output(NULL),
inplace(inplace),
output_shape({})
{
}
/**
* @brief Destroy the Multiply2D object.
*/
~Mul2D()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape.
* NOTE: input0.shape must equal to input1.shape.
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
this->output_shape = input0.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input0;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Mul2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Mul2D operation.
*
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @return Mul2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,169 @@
#pragma once
#include "dl_nn_pad.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Pad.
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Pad : public Layer
{
private:
std::vector<int> paddings;
std::vector<feature_t> constant_values;
padding_mode_t mode;
Tensor<feature_t> *output; /*<! output ptr of Pad >*/
std::vector<int> output_shape; /*<! output shape of Pad >*/
public:
Pad(std::vector<int> paddings,
std::vector<feature_t> constant_values = {0},
padding_mode_t mode = PADDING_CONSTANT,
const char *name = "Pad") : Layer(name),
paddings(paddings),
constant_values(constant_values),
mode(mode)
{
this->output = new Tensor<feature_t>;
}
/**
* @brief Destroy the Pad object.
*
*/
~Pad()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output padding and input padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(this->paddings.size() > 0);
int input_dims = input.shape.size();
int padding_dims = input_dims * 2;
if (this->paddings.size() == 1)
{
std::vector<int> _paddings(padding_dims, 0);
for (int i = 0; i < padding_dims; ++i)
{
_paddings[i] = this->paddings[0];
}
this->paddings = _paddings;
}
else if (this->paddings.size() == 2)
{
std::vector<int> _paddings(padding_dims, 0);
for (int i = 0; i < input_dims; ++i)
{
_paddings[2 * i] = this->paddings[0];
_paddings[2 * i + 1] = this->paddings[1];
}
this->paddings = _paddings;
}
else
{
assert(this->paddings.size() == padding_dims);
}
if (this->mode == PADDING_CONSTANT)
{
if (this->constant_values.size() == 1)
{
std::vector<feature_t> _constant_values(padding_dims, 0);
for (int i = 0; i < padding_dims; ++i)
{
_constant_values[i] = this->constant_values[0];
}
this->constant_values = _constant_values;
}
else if (this->constant_values.size() == 2)
{
std::vector<feature_t> _constant_values(padding_dims, 0);
for (int i = 0; i < input_dims; ++i)
{
_constant_values[2 * i] = this->constant_values[0];
_constant_values[2 * i + 1] = this->constant_values[1];
}
this->constant_values = _constant_values;
}
else
{
assert(constant_values.size() == padding_dims);
}
}
this->output_shape = input.shape;
for (int i = 0; i < input_dims; ++i)
{
this->output_shape[i] += (this->paddings[2 * i] + this->paddings[2 * i + 1]);
}
this->output->set_shape(this->output_shape);
this->output->set_exponent(input.exponent);
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Pad result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Pad operation
*
* @param input as an input.
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return Pad result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::pad(*this->output, input, this->paddings, this->constant_values, this->mode, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "pad");
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,145 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_prelu.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief PRelu(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class PRelu : public Layer
{
private:
const feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
int activation_exponent; /*<! exponent of quantized alpha elements >*/
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of prelu >*/
public:
/**
* @brief Construct a new PRelu object
*
* @param activation_element quantized alpha elements along channel axis
* @param activation_exponent exponent of quantized alpha elements
* @param name name of prelu
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
PRelu(const feature_t *activation_element,
const int activation_exponent = 0,
const char *name = "PRelu",
bool inplace = false) : Layer(name),
activation_element(activation_element),
activation_exponent(activation_exponent),
output(NULL),
inplace(inplace),
output_shape({})
{
}
/**
* @brief Destroy the PRelu object
*
*/
~PRelu()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_shape = input.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(input.exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& PRelu result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call PRelu operation.
*
* @param input as an input
* @param assign_core not effective yet
* @return PRelu result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->set_exponent(input.exponent);
this->output->malloc_element();
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,135 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_nn_relu.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief ReLU(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Relu : public Layer
{
private:
Tensor<feature_t> *output; /*<! output ptr of relu >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of relu >*/
public:
/**
* @brief Construct a new ReLU object
*
* @param name name of relu
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Relu(const char *name = "Relu", bool inplace = false) : Layer(name),
output(NULL), inplace(inplace), output_shape({})
{
}
/**
* @brief Destroy the ReLU object
*
*/
~Relu()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_shape = input.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(input.exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& ReLU result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call ReLU operation.
*
* @param input as an input
* @param assign_core not effective yet
* @return ReLU result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::relu(*this->output, input, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "relu");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::relu(*this->output, input, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "relu");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,128 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Reshape(input)
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Reshape : public Layer
{
private:
int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of Reshape >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of Reshape >*/
public:
/**
* @brief Construct a new Reshape object
*
* @param shape the target shape
* @param name name of Reshape layer
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Reshape(std::vector<int> shape, const char *name = "Reshape", bool inplace = false) : Layer(name),
output(NULL),
inplace(inplace),
output_shape(shape)
{
}
/**
* @brief Destroy the Reshape object
*
*/
~Reshape()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_exponent = input.exponent;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(input.shape);
this->output->reshape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
this->output->reshape(this->output_shape);
}
this->output_shape = this->output->shape;
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Reshape result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Reshape operation.
*
* @param input as an input
* @return Tensor<feature_t>& Reshape result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_exponent(input.exponent);
this->output->reshape(this->output_shape);
this->output->copy_element(input, true);
DL_LOG_LAYER_LATENCY_END(this->name, "reshape");
}
else
{
DL_LOG_LAYER_LATENCY_START();
this->output->reshape(this->output_shape);
DL_LOG_LAYER_LATENCY_END(this->name, "reshape");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,130 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief
*
* @tparam feature_t
*/
template <typename feature_t>
class Squeeze : public Layer
{
private:
int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of Squeeze >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
int axis; /*<! the dim to to be remove. make sure the length of the dim is equal to 1.
if axis == INT32_MAX, all the dims with length==1 will be removed. >*/
std::vector<int> output_shape; /*<! output shape of AvgPool2D >*/
public:
/**
* @brief Construct a new Squeeze object
*
* @param axis the dim to to be remove. make sure the length of the dim is equal to 1.
* if axis == INT32_MAX, all the dims with length==1 will be removed.
* @param name name of Squeeze layer
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Squeeze(int axis = INT32_MAX, const char *name = "Squeeze", bool inplace = false) : Layer(name),
output(NULL),
inplace(inplace),
axis(axis),
output_shape({})
{
}
/**
* @brief Destroy the Squeeze object
*
*/
~Squeeze()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_exponent = input.exponent;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(input.shape);
this->output->squeeze(this->axis);
this->output->free_element();
}
else
{
this->output = &input;
this->output->squeeze(this->axis);
}
this->output_shape = this->output->shape;
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Squeeze result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Squeeze operation.
*
* @param input as an input
* @return Tensor<feature_t>& Squeeze result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_exponent(input.exponent);
this->output->set_shape(this->output_shape);
this->output->copy_element(input, true);
DL_LOG_LAYER_LATENCY_END(this->name, "Squeeze");
}
else
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_shape(this->output_shape);
DL_LOG_LAYER_LATENCY_END(this->name, "Squeeze");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,145 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn_sub2d.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Activation(Sub2D(input0, input1)).
* NOTE: subtraction is element-wise, i.e., output[i,j,k] = input0[i,j,k] - input1[i,j,k]
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Sub2D : public Layer
{
private:
const int output_exponent; /*<! exponent of output >*/
const Activation<feature_t> *activation; /*<! activation of Sub2D, if you don't specify anything, no activation is applied >*/
Tensor<feature_t> *output; /*<! output ptr of Sub2D >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> output_shape; /*<! output shape of Sub2D >*/
public:
/**
* @brief Construct a new Sub2D object.
*
* @param output_exponent exponent of output
* @param activation activation of Mul2D, if you don't specify anything, no activation is applied
* @param name name of layer
* @param inplace true: the output will store to input0
* false: the output will store to a separate memory
*/
Sub2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = "Sub2D", bool inplace = false) : Layer(name),
output_exponent(output_exponent),
activation(activation),
output(NULL),
inplace(inplace),
output_shape({})
{
}
/**
* @brief Destroy the Sub2D object.
*/
~Sub2D()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape.
* NOTE: input0.shape must equal to input1.shape.
*
* @param input0 as one input
* @param input1 as another input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
{
assert(input0.is_same_shape(input1));
this->output_shape = input0.shape;
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input0;
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Sub2D result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Sub2D operation.
*
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @return Sub2D result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input0.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::sub2d(*this->output, input0, input1, this->activation, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
}
else
{
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
nn::sub2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,141 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_tool.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief
*
* @tparam feature_t
*/
template <typename feature_t>
class Transpose : public Layer
{
private:
int output_exponent; /*<! exponent of output >*/
Tensor<feature_t> *output; /*<! output ptr of Transpose >*/
bool inplace; /*<! true: the output will store to input0
false: the output will store to a separate memory >*/
std::vector<int> perm; /*<! the new arangement of the dims. if perm == {}, the dims arangement will be reversed. >*/
std::vector<int> output_shape; /*<! output shape of Transpose >*/
public:
/**
* @brief Construct a new Transpose object
*
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
* @param name name of Transpose layer
* @param inplace true: the output will store to input
* false: the output will store to a separate memory
*/
Transpose(std::vector<int> perm = {}, const char *name = "Transpose", bool inplace = false) : Layer(name),
output(NULL),
inplace(inplace),
perm(perm),
output_shape({})
{
}
/**
* @brief Destroy the Transpose object
*
*/
~Transpose()
{
if ((!this->inplace) && (this->output != NULL))
{
delete this->output;
}
}
/**
* @brief Update output shape and exponent
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
this->output_exponent = input.exponent;
this->output_shape = input.shape;
int dims = this->output_shape.size();
if (this->perm.size() == 0)
{
for (int i = dims - 1; i >= 0; i--)
{
this->perm.push_back(i);
}
}
for (int i = 0; i < dims; ++i)
{
if (this->perm[i] < 0)
this->perm[i] = dims + this->perm[i];
this->output_shape[i] = input.shape[this->perm[i]];
}
if (!this->inplace)
{
if (this->output == NULL)
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}
else
{
this->output = &input;
this->output->set_shape(this->output_shape);
}
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Transpose result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Transpose operation.
*
* @param input as an input.
* @return Tensor<feature_t>& Transpose result.
*/
Tensor<feature_t> &call(Tensor<feature_t> &input)
{
DL_LOG_LAYER_LATENCY_INIT();
if (!this->inplace)
{
DL_LOG_LAYER_LATENCY_START();
this->output->set_exponent(input.exponent);
this->output->transpose(input, this->perm);
DL_LOG_LAYER_LATENCY_END(this->name, "transpose");
}
else
{
DL_LOG_LAYER_LATENCY_START();
this->output->transpose(this->perm);
DL_LOG_LAYER_LATENCY_END(this->name, "transpose");
}
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,188 @@
#pragma once
#include "dl_define.hpp"
namespace dl
{
namespace math
{
/**
* @brief x^a.
*
* @param x as a base
* @param a as an exponent
* @return x^a
*/
inline float power(float x, int a)
{
if (a > 0)
{
return x * power(x, a - 1);
}
else if (a < 0)
{
return 1 / (x * power(x, -a - 1));
}
else
{
return 1.f;
}
}
/**
* @brief sqrt(x).
*
* @param x as a base
* @return sqrt(x)
*/
inline float sqrt_quick(float x)
{
const int result = 0x1fbb4000 + (*(int *)&x >> 1);
return *(float *)&result;
}
/**
* @brief 1/sqrt(x).
*
* @param x as a base
* @return 1/sqrt(x)
*/
inline float sqrt_reciprocal_quick(float x)
{
float xhalf = 0.5f * x;
int i = *(int *)&x; // get bits for floating value
i = 0x5f375a86 - (i >> 1); // gives initial guess y0
x = *(float *)&i; // convert bits back to float
x = x * (1.5f - xhalf * x * x); // Newton step, repeating increases accuracy
return x;
}
static const float EN = 0.00001f;
/**
* @brief sqrt(x).
*
* @param x as a base
* @return sqrt(x)
*/
inline float sqrt_newton(float x)
{
/**
* Use Newton iteration method to find the square root
* */
if (x == 0.f)
return 0.f;
float result = x;
float last_value;
do
{
last_value = result;
result = (last_value + x / last_value) * 0.5;
} while (DL_ABS(result - last_value) > EN);
return result;
}
/**
* @brief n-th root of x.
*
* @param x as a base
* @param n root times
* @return n-th root of x
*/
inline float root_newton(float x, int n)
{
if (n == 2)
return sqrt_newton(x);
if (n == 0)
return 1.f;
if (n == 1)
return x;
if (x == 0.f)
return 0.f;
float result = x;
float last_value;
float _n = (float)((n - 1) * n);
do
{
last_value = result;
result = _n * last_value + x / (n * power(last_value, n - 1));
} while (DL_ABS(result - last_value) > EN);
return result;
}
/**
* @brief atan(x).
*
* @param x as an input
* @return atan(x) in range [-pi/2, pi/2]
*/
inline float atan(float x)
{
return x * (0.78539816 - (DL_ABS(x) - 1) * (0.2447 + 0.0663 * DL_ABS(x)));
// float s = x*x;
// return ((-0.0464964749 * s + 0.15931422) * s - 0.327622764) * s * x + x;
}
// TODO:@yuanjiong
/**
* @brief
*
* @param x
* @param y
* @return in range [-pi, pi]
*/
inline float atan2(float x, float y)
{
float ax = DL_ABS(x);
float ay = DL_ABS(y);
float eps = 1e-8;
float a = DL_MIN(ax, ay) / (DL_MAX(ax, ay) + eps);
float r = atan(a); //[0, pi/2]
if (ay > ax)
r = 1.57079633 - r;
if (x < 0)
r = 3.14159265 - r;
if (y < 0)
r = -r;
return r;
}
/**
* @brief acos(x).
*
* @param x as an input
* @return acos(x) in range [-pi/2, pi/2]
*/
inline float acos(float x)
{
return atan2(x, sqrt_newton(1.0 - x * x));
}
/**
* @brief asin(x).
*
* @param x as an input
* @return asin(x) in range [0, pi]
*/
inline float asin(float x)
{
return atan2(sqrt_newton(1.0 - x * x), x);
}
/**
* @brief e^x
*
* @param x exponent
* @param steps iteration steps
* @return e^x
*/
inline float exp_fast(double x, int steps)
{
x = 1.0 + x / (1 << steps);
for (int i = 0; i < steps; i++)
x *= x;
return x;
}
}
}

View File

@ -0,0 +1,397 @@
#pragma once
#include <stdint.h>
#include <stdio.h>
#include <math.h>
#include <vector>
#include "dl_define.hpp"
#include "dl_tool.hpp"
#include "dl_variable.hpp"
#include "esp_timer.h"
namespace dl
{
namespace math
{
/**
* @brief the Matrix class
*
* @tparam T
*/
template <typename T>
class Matrix
{
public:
T **array;
int h;
int w;
Matrix() : h(0), w(0)
{
this->array = NULL;
}
Matrix(int h, int w) : h(h), w(w)
{
this->calloc_element();
}
Matrix(int h, int w, T s) : h(h), w(w)
{
this->calloc_element();
this->set_value(s);
}
Matrix(const Matrix<T> &mat) : h(mat.h), w(mat.w)
{
this->calloc_element();
this->set_value(mat);
}
virtual ~Matrix()
{
if (this->array != NULL)
{
for (int i = 0; i < this->h; i++)
{
free(this->array[i]);
}
free(this->array);
this->array = NULL;
}
}
/**
* @brief calloc the matrix element
*
*/
void calloc_element()
{
if ((this->h > 0) && (this->w > 0))
{
this->array = (T **)calloc(this->h, sizeof(T *));
for (int i = 0; i < this->h; i++)
{
this->array[i] = (T *)calloc(this->w, sizeof(T));
}
}
else
{
this->array = NULL;
}
}
/**
* @brief Set the matrix element to random number.
*
* @param thresh the max abs value of the element.
*/
void set_random(T thresh = 1)
{
unsigned int seed = esp_timer_get_time();
srand(seed);
for (int i = 0; i < this->h; i++)
{
for (int j = 0; j < this->w; j++)
{
this->array[i][j] = ((T)rand()) / (T)(RAND_MAX)*thresh;
}
}
}
/**
* @brief Set the small value to zero
*
* @param thresh the threshold of small value
*/
void set_zero(T thresh = 1e-8)
{
for (int i = 0; i < this->h; i++)
{
for (int j = 0; j < this->w; j++)
{
if (DL_ABS(this->array[i][j]) < thresh)
{
this->array[i][j] = 0;
}
}
}
}
/**
* @brief Set the matrix value from a vector
*
* @tparam TT
* @param mat the input vector
*/
template <typename TT>
void set_value(std::vector<TT> mat)
{
int area = this->w * this->h;
assert(area == mat.size());
int index = 0;
for (int i = 0; i < this->h; i++)
{
for (int j = 0; j < this->w; j++)
{
this->array[i][j] = (T)(mat[index++]);
}
}
}
/**
* @brief Set the matrix value from another matrix.
*
* @tparam TT
* @param mat the input matrix.
*/
template <typename TT>
void set_value(const Matrix<TT> &mat)
{
assert((this->h == mat.h) && (this->w == mat.w));
for (int i = 0; i < this->h; i++)
{
for (int j = 0; j < this->w; j++)
{
this->array[i][j] = (T)(mat.array[i][j]);
}
}
}
/**
* @brief Set a part of the matrix value from another matrix.
*
* @param h_start the start index of height
* @param h_end the end index of height
* @param w_start the start index of width
* @param w_end the end index of width
* @param mat the input matrix
*/
void set_value(int h_start, int h_end, int w_start, int w_end, const Matrix<T> &mat)
{
int h = h_end - h_start;
int w = w_end - w_start;
assert((h == mat.h) && (w == mat.w));
assert((h_end <= this->h) && (w_end <= this->w) && (h_start >= 0) && (w_start >= 0));
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
this->array[i + h_start][j + w_start] = mat.array[i][j];
}
}
}
/**
* @brief Set the matrix value to a constant.
*
* @tparam TT
* @param s the input value.
*/
template <typename TT>
void set_value(TT s)
{
for (int i = 0; i < this->h; i++)
{
for (int j = 0; j < this->w; j++)
{
this->array[i][j] = (T)s;
}
}
}
/**
* @brief print the matrix element.
*
*/
void print_value() const
{
printf("h: %d, w: %d\n", this->h, this->w);
for (int i = 0; i < this->h; i++)
{
for (int j = 0; j < this->w; j++)
{
printf("%f ", (float)(this->array[i][j]));
}
printf("\n");
}
}
/**
* @brief do matrix multiply
*
* @param input the input matrix
* @return Matrix<T> the output matrix
*/
Matrix<T> matmul(const Matrix<T> &input) const;
/**
* @brief transpose the matrix
*
* @return Matrix<T> the transposed matrix
*/
Matrix<T> transpose() const;
/**
* @brief get the inverse matrix
*
* @return Matrix<T> the output matrix
*/
Matrix<T> inverse() const;
/**
* @brief get the diagonal of the matrix
*
* @return Matrix<T> the diagonal
*/
Matrix<T> diagonal() const;
/**
* @brief slice the matrix
*
* @param h_start the start index of height
* @param h_end the end index of height
* @param w_start the start index of width
* @param w_end the end index of width
* @return Matrix<T> the output.
*/
Matrix<T> slice(int h_start, int h_end, int w_start, int w_end) const;
/**
* @brief get an identity matrix
*
* @param n the dim of the identity matrix
* @return Matrix<T> the output
*/
static Matrix<T> identity(int n)
{
Matrix<T> A(n, n);
for (int i = 0; i < n; ++i)
{
A.array[i][i] = 1;
}
return A;
}
/**
* @brief get a diag matrix
*
* @param d the diagonal value.
* @return Matrix<T> the output
*/
static Matrix<T> diag(const Matrix<T> &d)
{
assert(d.h == 1);
Matrix<T> A(d.w, d.w);
for (int i = 0; i < d.w; ++i)
{
A.array[i][i] = d.array[0][i];
}
return A;
}
static Matrix<T> arange(uint32_t n)
{
Matrix<T> A(1, n);
for (int i = 0; i < n; ++i)
{
A.array[0][i] = i;
}
return A;
}
static Matrix<T> arange(uint32_t n1, uint32_t n2)
{
int len = n2 - n1;
assert(len > 0);
Matrix<T> A(1, len);
for (int i = 0; i < len; ++i)
{
A.array[0][i] = n1 + i;
}
return A;
}
/**
* @brief get the F_norm of the matrix
*
* @return T the output F_norm
*/
T F_norm() const
{
T f_n = 0.0;
for (int i = 0; i < this->h; ++i)
{
for (int j = 0; j < this->w; ++j)
{
f_n += (this->array[i][j] * this->array[i][j]);
}
}
f_n = sqrt_newton(f_n);
return f_n;
}
Matrix<T> &operator=(const Matrix<T> &A)
{
if ((A.h == this->h) && (A.w == this->w))
{
for (int i = 0; i < A.h; ++i)
{
for (int j = 0; j < A.w; ++j)
{
this->array[i][j] = A.array[i][j];
}
}
}
else
{
if (this->array != NULL)
{
for (int i = 0; i < this->h; ++i)
{
free(this->array[i]);
}
free(this->array);
this->array = NULL;
}
this->h = A.h;
this->w = A.w;
if ((A.h > 0) && (A.w > 0))
{
this->calloc_element();
this->set_value(A);
}
}
return *this;
}
};
/**
* @brief Get the affine transform matrix
*
* @param source_coord the source coordinates
* @param dest_coord the target coordinates
* @return Matrix<float> the output matrix
*/
Matrix<float> get_affine_transform(Matrix<float> &source_coord, Matrix<float> &dest_coord);
/**
* @brief Get the similarity transform matrix
*
* @param source_coord the source coordinates
* @param dest_coord the target coordinates
* @return Matrix<float> the output matrix
*/
Matrix<float> get_similarity_transform(Matrix<float> &source_coord, Matrix<float> &dest_coord);
/**
* @brief Get the perspective transform matrix
*
* @param source_coord the source coordinates
* @param dest_coord the target coordinates
* @return Matrix<float> the output matrix
*/
Matrix<float> get_perspective_transform(Matrix<float> &source_coord, Matrix<float> &dest_coord);
} // namespace math
} // namespace dl

View File

@ -0,0 +1,47 @@
#pragma once
#include <stdint.h>
#include <vector>
#include <list>
#include "dl_detect_define.hpp"
/**
* @brief Hardware Requirement.
* - flash 310kB
*/
class CatFaceDetectMN03
{
private:
void *model;
public:
/**
* @brief Construct a new Cat Face Detect MN03 object.
*
* @param score_threshold predicted boxes with score lower than the threshold will be filtered out
* @param nms_threshold predicted boxes with IoU higher than the threshold will be filtered out
* @param top_k first k highest score boxes will be remained
* @param resize_scale resize scale to implement on input image
*/
CatFaceDetectMN03(const float score_threshold, const float nms_threshold, const int top_k, const float resize_scale);
/**
* @brief Destroy the Cat Face Detect MN03 object.
*
*/
~CatFaceDetectMN03();
/**
* @brief Inference.
*
* @tparam T supports uint8_t and uint16_t
* - uint8_t: input image is RGB888
* - uint16_t: input image is RGB565
* @param input_element pointer of input image
* @param input_shape shape of input image
* @return detection result
*/
template <typename T>
std::list<dl::detect::result_t> &infer(T *input_element, std::vector<int> input_shape);
};

View File

@ -0,0 +1,366 @@
#pragma once
#include "dl_image.hpp"
typedef struct
{
int area; /*!< Area of connected domains >*/
std::vector<int> center; /*<! centroid of connected domains [x, y] >*/
std::vector<int> box; /*<! [left_up_x, left_up_y, right_down_x, right_down_y] >*/
} color_detect_result_t;
typedef struct
{
std::vector<int> start_col;
std::vector<int> end_col;
std::vector<int> row;
std::vector<int> index;
std::vector<int> area;
} color_segment_result_t;
typedef struct
{
std::vector<uint8_t> color_thresh; /*!< threshold of colors, The threshold of each color is composed of 6 numbers >*/
int area_thresh; /*!< the area threshold of each color,
the area that is smaller than the threshold is filtered >*/
std::string name; /*!<name of the color>*/
} color_info_t;
class ColorDetector
{
private:
std::vector<std::vector<color_detect_result_t>> detection_results; /*!< detection results >*/
std::vector<color_segment_result_t> segmentation_results; /*!< segmentation results >*/
std::vector<color_info_t> registered_colors; /*!< the infomation of registered colors >*/
std::vector<uint8_t> color_thresh_offset; /*!< HSV offset of the registered colors>*/
std::vector<int> detection_shape; /*!< the inference shape of images, the input image will be resized to this shape.
if the shape == {}, the input image will not be resized >*/
bool bgr; /*!< true: the input image is in BGR format
false: the input image is in RGB format >*/
int id_nums; /*!< the number of registered colors in history>*/
float h_ratio;
float w_ratio;
void color_detection_forward(dl::Tensor<uint8_t> &bin, int area_thresh);
public:
/**
* @brief get the color threshold of rectangular region in the image
*
* @param image the input image in RGB888 format.
* @param box the coordinates of the rectanglar region : [left_up_x, left_up_y, right_down_x, right_down_y]
* @return std::vector<uint8_t> the threshold.
*/
std::vector<uint8_t> cal_color_thresh(dl::Tensor<uint8_t> &image, std::vector<int> box);
/**
* @brief get the color threshold of rectangular region in the image
*
* @param input the ptr of RGB565 image.
* @param input_shape shape of the input image.
* @param box the coordinates of the rectanglar region : [left_up_x, left_up_y, right_down_x, right_down_y]
* @return std::vector<uint8_t> the threshold.
*/
std::vector<uint8_t> cal_color_thresh(uint16_t *input, std::vector<int> input_shape, std::vector<int> box);
/**
* @brief register a new color to the color detector
*
* @param image the input image in RGB888 format.
* @param box the coordinates of the rectanglar region : [left_up_x, left_up_y, right_down_x, right_down_y]
* @param area_thresh the area threshold of the color
* @param id the index of the color
* @return int the number of the registered colors. if the id is not valid, return -1.
*/
int register_color(dl::Tensor<uint8_t> &image, std::vector<int> box, int area_thresh = 256, std::string color_name = "", int id = -1);
/**
* @brief register a new color to the color detector
*
* @param input the ptr of RGB565 image.
* @param input_shape shape of the input image.
* @param box the coordinates of the rectanglar region : [left_up_x, left_up_y, right_down_x, right_down_y]
* @param area_thresh the area threshold of the color
* @param id the index of the color
* @return int the number of the registered colors. if the id is not valid, return -1.
*/
int register_color(uint16_t *input, std::vector<int> input_shape, std::vector<int> box, int area_thresh = 256, std::string color_name = "", int id = -1);
/**
* @brief register a new color to the color detector
*
* @param color_thresh the color threshold
* @param area_thresh the area threshold of the color
* @param id the index of the color
* @return int the number of the registered colors. if the id is not valid, return -1.
*/
int register_color(std::vector<uint8_t> color_thresh, int area_thresh = 256, std::string color_name = "", int id = -1);
/**
* @brief delete a registered color
*
* @param id the index of the color
* @return int the number of the registered colors. if the id is not valid, return -1.
*/
int delete_color(int id = -1);
/**
* @brief delete a registered color
*
* @param color_name name of the registered_color
* @return int the number of the registered colors. if the id is not valid, return -1.
*/
int delete_color(std::string color_name);
/**
* @brief delete all the registered colors
*
*/
void clear_color();
/**
* @brief detect the colors based on the color thresholds
*
* @param image the input image.
* @return std::vector<std::vector<color_detect_result_t>>& detection result.
*/
std::vector<std::vector<color_detect_result_t>> &detect(dl::Tensor<uint8_t> &image, std::vector<int> color_ids = {});
/**
* @brief
*
* @param input
* @param input_shape
* @return std::vector<std::vector<color_detect_result_t>>&
*/
std::vector<std::vector<color_detect_result_t>> &detect(uint16_t *input_shape, std::vector<int> shape, std::vector<int> color_ids = {});
/**
* @brief Construct a new Color Detector object
*
* @param color_thresh_offset HSV offset of the registered colors>
* @param detection_shape the inference shape of images, the input image will be resized to this shape
* @param bgr true: the input image is in BGR format
* false: the input image is in RGB format
*/
ColorDetector(std::vector<uint8_t> color_thresh_offset = {}, std::vector<int> detection_shape = {}, bool bgr = true) : color_thresh_offset(color_thresh_offset),
detection_shape(detection_shape), bgr(bgr), id_nums(0)
{
}
/**
* @brief Destroy the Color Detector object
*
*/
~ColorDetector() {}
/**
* @brief Get the detection results object
*
* @return std::vector<std::vector<color_detect_result_t>>& the detection result.
*/
std::vector<std::vector<color_detect_result_t>> &get_detection_results()
{
return this->detection_results;
}
/**
* @brief Get the segmentation results object
*
* @return std::vector<color_segment_result_t>& the segmentation result.
*/
std::vector<color_segment_result_t> &get_segmentation_results()
{
return this->segmentation_results;
}
/**
* @brief Get the registered colors object
*
* @return std::vector<color_info_t> the information of resgistered colors
*/
std::vector<color_info_t> get_registered_colors()
{
return this->registered_colors;
}
/**
* @brief Set the color thresh offset object
*
* @param color_thresh_offset the offset of color thresh for registered colors
* @return ColorDetector&
*/
ColorDetector &set_color_thresh_offset(std::vector<uint8_t> color_thresh_offset)
{
assert(color_thresh_offset.size() == 3);
this->color_thresh_offset = color_thresh_offset;
return *this;
}
/**
* @brief Get the color thresh offset object
*
* @return std::vector<uint8_t> color_thresh_offset
*/
std::vector<uint8_t> get_color_thresh_offset()
{
return this->color_thresh_offset;
}
/**
* @brief Set the area thresh object
*
* @param area_thresh the area thresh for each registered colors
* @return ColorDetector&
*/
ColorDetector &set_area_thresh(std::vector<int> area_thresh)
{
assert((area_thresh.size() == this->registered_colors.size()) || (area_thresh.size() == 1));
if (area_thresh.size() == 1)
{
for (int i = 0; i < this->registered_colors.size(); ++i)
{
this->registered_colors[i].area_thresh = area_thresh[0];
}
}
else
{
for (int i = 0; i < this->registered_colors.size(); ++i)
{
this->registered_colors[i].area_thresh = area_thresh[i];
}
}
return *this;
}
/**
* @brief Set the area thresh object
*
* @param area_thresh the area thresh for each registered colors
* @param id index of the registered color
* @return ColorDetector&
*/
ColorDetector &set_area_thresh(int area_thresh, int id)
{
assert((id >= 0) && (id < this->registered_colors.size()));
this->registered_colors[id].area_thresh = area_thresh;
return *this;
}
/**
* @brief Set the bgr object
*
* @param bgr
* @return ColorDetector&
*/
ColorDetector &set_bgr(bool bgr)
{
this->bgr = bgr;
return *this;
}
/**
* @brief Get the bgr object
*
* @return bool bgr flag
*/
bool get_bgr()
{
return this->bgr;
}
/**
* @brief Get the detection shape object
*
* @return std::vector<int>
*/
std::vector<int> get_detection_shape()
{
return this->detection_shape;
}
/**
* @brief Set the detection shape object
*
* @param detection_shape the inference shape of images, the input image will be resized to this shape
* @return ColorDetector&
*/
ColorDetector &set_detection_shape(std::vector<int> detection_shape)
{
assert(detection_shape.size() == 3);
this->detection_shape = detection_shape;
return *this;
}
/**
* @brief Get the registered colors num
*
* @return int the registered colors num
*/
int get_registered_colors_num()
{
return this->registered_colors.size();
}
/**
* @brief print the detection detection results
*
* @param tag
*/
void print_detection_results(const char *tag = "RGB")
{
printf("\n%s | color detection result:\n", tag);
for (int i = 0; i < this->detection_results.size(); ++i)
{
printf("color %d: detected box :%d\n", i, this->detection_results[i].size());
for (int j = 0; j < this->detection_results[i].size(); ++j)
{
printf("center: (%d, %d)\n", this->detection_results[i][j].center[0], this->detection_results[i][j].center[1]);
printf("box: (%d, %d), (%d, %d)\n", this->detection_results[i][j].box[0], this->detection_results[i][j].box[1], this->detection_results[i][j].box[2], this->detection_results[i][j].box[3]);
printf("area: %d\n", this->detection_results[i][j].area);
}
printf("\n");
}
}
/**
* @brief print the segmentation results
*
* @param tag
*/
void print_segmentation_results(const char *tag = "RGB")
{
printf("\n%s | color segmentation result:\n", tag);
for (int i = 0; i < this->segmentation_results.size(); ++i)
{
printf("color %d: detected box :%d\n", i, this->detection_results[i].size());
for (int j = 0; j < this->segmentation_results[i].index.size(); ++j)
{
printf("box_index: %d, start col: %d, end col: %d, row: %d, area: %d\n",
this->segmentation_results[i].index[j], this->segmentation_results[i].start_col[j], this->segmentation_results[i].end_col[j],
this->segmentation_results[i].row[j], this->segmentation_results[i].area[j]);
}
printf("\n");
}
}
/**
* @brief draw the color segmentation result on the input image
*
* @param image the input RGB image
* @param draw_colors RGB values for each detected colors
* @param draw_backgound draw the background if it is true
* @param background_color RGB values for the background color
*/
void draw_segmentation_results(dl::Tensor<uint8_t> &image, std::vector<std::vector<uint8_t>> draw_colors, bool draw_backgound = true, std::vector<uint8_t> background_color = {0, 0, 0});
/**
* @brief draw the color segmentation result on the input image
*
* @param image the pointer of the input RGB565 image
* @param image_shape the shape of the input image
* @param draw_colors RGB565 values for each detected colors
* @param draw_backgound draw the background if it is true
* @param background_color RGB565 values for the background color
*/
void draw_segmentation_results(uint16_t *image, std::vector<int> image_shape, std::vector<uint16_t> draw_colors, bool draw_backgound = true, uint16_t background_color = 0x0000);
};

View File

@ -0,0 +1,30 @@
#pragma once
#include "dl_variable.hpp"
#include "face_recognition_tool.hpp"
#include "face_recognizer.hpp"
#include <vector>
using namespace dl;
/**
* @brief face recognition model v1
* input size: 112 x 112 x 3
* quantization mode: S16
*
*/
class FaceRecognition112V1S16 : public FaceRecognizer<int16_t>
{
public:
/**
* @brief Construct a new Face_Recognition_112_V1_S16 object
*
*/
FaceRecognition112V1S16();
/**
* @brief Destroy the Face_Recognition_112_V1_S16 object
*
*/
~FaceRecognition112V1S16();
};

View File

@ -0,0 +1,30 @@
#pragma once
#include "dl_variable.hpp"
#include "face_recognition_tool.hpp"
#include "face_recognizer.hpp"
#include <vector>
using namespace dl;
/**
* @brief face recognition model v1
* input size: 112 x 112 x 3
* quantization mode: S8
*
*/
class FaceRecognition112V1S8 : public FaceRecognizer<int8_t>
{
public:
/**
* @brief Construct a new Face_Recognition_112_V1_S8 object
*
*/
FaceRecognition112V1S8();
/**
* @brief Destroy the Face Recognition_112_V1_S8 object
*
*/
~FaceRecognition112V1S8();
};

View File

@ -0,0 +1,170 @@
#pragma once
#include "dl_variable.hpp"
#include "dl_define.hpp"
#include "dl_tool.hpp"
#include "dl_math.hpp"
#include "dl_math_matrix.hpp"
#include <vector>
#include <list>
#include <algorithm>
#include <math.h>
#include <string>
#include "esp_partition.h"
/**
* @brief struct of face similarity
*
*/
typedef struct
{
int id;
std::string name;
float similarity;
} face_info_t;
/**
* @brief Face ID
*
* @tparam feature_t
*/
template <typename feature_t>
class FaceID
{
public:
int id; /*<! id index >*/
dl::Tensor<feature_t> id_emb; /*<! id embedding >*/
std::string name; /*<! id name >*/
/**
* @brief Construct a new Face ID object
*
* @param id id index
* @param id_emb id embedding
* @param name id name
*/
FaceID(int id, dl::Tensor<feature_t> &id_emb, std::string name = "");
/**
* @brief Construct a new Face ID which is same as input face_id
*
* @param face_id input face_id
*/
FaceID(FaceID<feature_t> &face_id);
/**
* @brief Destroy the Face ID object
*
*/
~FaceID() {}
/**
* @brief print the face id information
*
*/
void print();
};
namespace face_recognition_tool
{
/**
* @brief l2 normalize the feautre
*
* @param feature
*/
void l2_norm(dl::Tensor<float> &feature);
/**
* @brief calculate the cosine distance of the input ids
*
* @param id_1 id 1
* @param id_2 id 2
* @param normalized_ids true: the input ids have been normalized.
* false: the input ids have not been normlized
* @param type 0: cos dist: [-1, 1]
* 1: normalzied cos dist: [0, 1]
* @return float the cosine distance
*/
float cos_distance(dl::Tensor<float> &id_1, dl::Tensor<float> &id_2, bool normalized_ids = true, int8_t type = 0);
/**
* @brief transform the image to the input of a mfn model
*
* @tparam T
* @param image the input image.
* @param free_input true: free the input image.
* false: do not free the input image.
* @param do_padding true: pad the result.
* false: do not pad the result.
* @return dl::Tensor<T>*
*/
template <typename T>
dl::Tensor<T> *transform_mfn_input(dl::Tensor<uint8_t> &image, bool free_input = false);
/**
* @brief transform the image to the input of a mfn model
*
* @tparam T
* @param image the input image.
* @param output the preprocessed image.
* @param free_input true: free the input image.
* false: do not free the input image.
* @param do_padding true: pad the result.
* false: do not pad the result
*/
template <typename T>
void transform_mfn_input(dl::Tensor<uint8_t> &image, dl::Tensor<T> &output, bool free_input = false);
/**
* @brief transform the mfn output embedding to a floating embedding
*
* @tparam T
* @param input the input embedding.
* @param norm true: normalize the output embedding.
* false: do not normalize the output embedding.
* @param free_input true: free the input embedding.
* false: do not free the input embedding.
* @return dl::Tensor<float>*
*/
template <typename T>
dl::Tensor<float> *transform_mfn_output(dl::Tensor<T> &input, bool norm = true, bool free_input = false);
/**
* @brief transform the mfn output embedding to a floating embedding
*
* @tparam T
* @param input the input embedding.
* @param output the output embedding.
* @param norm true: normalize the output embedding.
* false: do not normalize the output embedding.
* @param free_input true: free the input embedding.
* false: do not free the input embedding.
*/
template <typename T>
void transform_mfn_output(dl::Tensor<T> &input, dl::Tensor<float> &output, bool norm = true, bool free_input = false);
/**
* @brief get the aligned face.
*
* @tparam T
* @param input input tensor
* @param output the output aligned face.
* @param landmarks the landmarks of the face.
*/
template <typename T>
void align_face(dl::Tensor<T> *input, dl::Tensor<T> *output, std::vector<int> &landmarks);
/**
* @brief get the aligned face.
*
* @tparam T
* @param input input image with rgb565 format.
* @param shape the shape of the input image.
* @param output the output aligned face.
* @param landmarks the landmarks of the face.
*/
template <typename T>
void align_face(uint16_t *input, std::vector<int> shape, dl::Tensor<T> *output, std::vector<int> &landmarks);
} // namespace face_recognition_tool

View File

@ -0,0 +1,296 @@
#pragma once
#include "dl_variable.hpp"
#include "face_recognition_tool.hpp"
#include <vector>
using namespace dl;
/**
* @brief
*
* @tparam feature_t
*/
template<typename feature_t>
class FaceRecognizer
{
public:
/**
* @brief Construct a new Face Recognizer object
*
*/
FaceRecognizer();
/**
* @brief Destroy the Face Recognizer object
*
*/
virtual ~FaceRecognizer();
void *model;
/**
* @brief Set the face recognition threshold [-1, 1], default thresh: 0.55
* Note: If the similarity of two faces is greater than the threshold, they will be judged as the same person
*
* @param thresh
*/
void set_thresh(float thresh);
/**
* @brief Get the current threshold of recognizer.
*
* @return float current threshold.
*/
float get_thresh();
/**
* @brief Get the input shape of the recognizer.
*
* @return std::vector<int> the input shape of the recognizer.
*/
std::vector<int> get_input_shape();
/**
* @brief do forward
*
* @param model_input the input data of the face recognition model.
* Note: the input data should have been preprocessed.
* @return Tensor<feature_t>& the output of the face recognition model.
*/
Tensor<feature_t> &forward(Tensor<feature_t> &model_input);
/**
* @brief recognize face
*
* @param image_input the pointer of the input image with format bgr565.
* @param shape the shape of the input image
* @param landmarks face landmarks coordinates
* @return face_info_t the recognition result.
*/
face_info_t recognize(uint16_t *image_input, std::vector<int> shape, std::vector<int> &landmarks);
/**
* @brief recognize face
*
* @param image_input the pointer of the input image with format bgr565.
* @param shape the shape of the input image
* @param aligned_face the Tensor to store the intermeidate aligned face.
* @param landmarks face landmarks coordinates
* @return face_info_t the recognition result.
*/
face_info_t recognize(uint16_t *image_input, std::vector<int> shape, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks);
/**
* @brief recognize face
*
* @param image_input the Tensor of input image with format bgr888.
* @param landmarks face landmarks coordinates
* @return face_info_t the recognition result.
*/
face_info_t recognize(Tensor<uint8_t> &image_input, std::vector<int> &landmarks);
/**
* @brief recognize face
*
* @param image_input the Tensor of input image with format bgr888.
* @param aligned_face the Tensor to store the intermeidate aligned face.
* @param landmarks face landmarks coordinates
* @return face_info_t the recognition result.
*/
face_info_t recognize(Tensor<uint8_t> &image_input, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks);
/**
* @brief recognize face
*
* @param aligned_face the Tensor of the input aligned face with format bgr888.
* @return face_info_t the recognition result.
*/
face_info_t recognize(Tensor<uint8_t> &aligned_face);
/**
* @brief recognize the face embedding.
*
* @param emb the normalized face embbeding.
* @return face_info_t the recognition result.
*/
face_info_t recognize(Tensor<float> &emb);
/**
* @brief Get the index of the enrolled ids
*
* @return std::vector<int> a vector of face ids index
*/
std::vector<face_info_t> get_enrolled_ids();
/**
* @brief Get the face embedding
*
* @param id the face id index
* @return Tensor<float> the face embedding of the face id index.
* if there is no matched id return the embedding of last input image.
*/
Tensor<float> &get_face_emb(int id=-1);
/**
* @brief Get the number of enrolled id
*
* @return int the number of enrolled id
*/
int get_enrolled_id_num();
/**
* @brief enroll face id
*
* @param image_input the pointer of the input image with format bgr565.
* @param shape the shape of the input image
* @param landmarks face landmarks coordinates
* @param name name of the face id.
* @return int the face id index of the enrolled embedding.
*/
int enroll_id(uint16_t *image_input, std::vector<int> shape, std::vector<int> &landmarks, std::string name="", bool update_flash = false);
/**
* @brief enroll face id
*
* @param image_input the pointer of the input image with format bgr565.
* @param shape the shape of the input image
* @param aligned_face the Tensor to store the intermeidate aligned face.
* @param landmarks face landmarks coordinates
* @param name name of the face id.
* @param update_flash true: the enrolled ids will be stored to flash
* false: the enrolled ids will not be stored to flash
* @return int the face id index of the enrolled embedding.
*/
int enroll_id(uint16_t *image_input, std::vector<int> shape, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks, std::string name="", bool update_flash = false);
/**
* @brief enroll face id
*
* @param image_input the Tensor of input image with format bgr888.
* @param landmarks face landmarks coordinates
* @param name name of the face id.
* @param update_flash true: the enrolled ids will be stored to flash
* false: the enrolled ids will not be stored to flash
* @return int the face id index of the enrolled embedding.
*/
int enroll_id(Tensor<uint8_t> &image_input, std::vector<int> &landmarks, std::string name="", bool update_flash = false);
/**
* @brief enroll face id
*
* @param image_input the Tensor of input image with format bgr888.
* @param aligned_face the Tensor to store the intermeidate aligned face.
* @param landmarks face landmarks coordinates
* @param name name of the face id.
* @param update_flash true: the enrolled ids will be stored to flash
* false: the enrolled ids will not be stored to flash
* @return int the face id index of the enrolled embedding.
*/
int enroll_id(Tensor<uint8_t> &image_input, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks, std::string name="", bool update_flash = false);
/**
* @brief enroll face id
*
* @param aligned_face the Tensor of the input aligned face with format bgr888.
* @param name name of the face id.
* @param update_flash true: the enrolled ids will be stored to flash
* false: the enrolled ids will not be stored to flash
* @return int the face id index of the enrolled embedding.
*/
int enroll_id(Tensor<uint8_t> &aligned_face, std::string name="", bool update_flash = false);
/**
* @brief enroll the normalzied face embedding.
*
* @param emb the normalized face embbeding.
* @param name name of the face id.
* @param update_flash true: the enrolled ids will be stored to flash
* false: the enrolled ids will not be stored to flash
* @return int the face id index of the enrolled embedding.
*/
int enroll_id(Tensor<float> &emb, std::string name="", bool update_flash = false);
/**
* @brief delete the last enrolled face id.
* @param update_flash true: the ids will be updated to flash
* false: the ids will not be stored to flash
*
* @return int the number of remained face ids.
* if the face ids list is empty, return -1
*/
int delete_id(bool update_flash = false);
/**
* @brief delete the face id with id index.
*
* @param id face id index.
* @param update_flash true: the ids will be updated to flash
* false: the ids will not be stored to flash
* @return int the number of remained face ids.
* if there is no matched id return -1
*/
int delete_id(int id, bool update_flash = false);
/**
* @brief Set the enrolled ids
*
* @param ids the ids to be set
* @param update_flash true: the ids will be updated to flash
* false: the ids will not be stored to flash
* @return int the number of enrolled ids.
*/
int set_ids(std::vector<FaceID<float> *> &ids, bool update_flash = false);
/**
* @brief Set the enrolled ids from flash
*
* @return int the number of enrolled ids.
*/
int set_ids_from_flash();
/**
* @brief write the enrolled ids to flash
*
* @return int the number of enrolled ids.
*/
int write_ids_to_flash();
/**
* @brief Get the enrolled ids with name object
*
* @param name
* @return std::vector<face_info_t>
*/
std::vector<face_info_t> get_enrolled_ids_with_name(std::string name);
/**
* @brief Check whether the Flash partition is available
*
* @return int -2: the partition has not been set
* -1: the data in the flash does not match the current model.
* model_check_code: the Flash partition is available.
* number of ids in flash: The IDs in Flash and RAM does not sync.
*/
int check_partition();
/**
* @brief delete all the enrolled face ids.
* @param update_flash true: the ids will be updated to flash
* false: the ids will not be stored to flash
*
*/
void clear_id(bool update_flash = false);
/**
* @brief Set the partition for saving face ids to flash or reading face ids from flash.
*
* @param type esp_partition_type
* @param subtype esp_partition_subtype
* @param label the partition label
* @return int 0: set the partition failed
* 1: set the partition successfully
*/
int set_partition(esp_partition_type_t type, esp_partition_subtype_t subtype, const char *label);
};

View File

@ -0,0 +1,41 @@
#pragma once
#include <vector>
#include <list>
#include "dl_detect_define.hpp"
class HumanFaceDetectMNP01
{
private:
void *model;
public:
/**
* @brief Construct a new Human Face Detect MNP01 object.
*
* @param score_threshold predicted boxes with score lower than the threshold will be filtered out
* @param nms_threshold predicted boxes with IoU higher than the threshold will be filtered out
* @param top_k first k highest score boxes will be remained
*/
HumanFaceDetectMNP01(const float score_threshold, const float nms_threshold, const int top_k);
/**
* @brief Destroy the Human Face Detect MNP01 object.
*
*/
~HumanFaceDetectMNP01();
/**
* @brief Inference.
*
* @tparam T supports uint16_t and uint8_t,
* - uint16_t: input image is RGB565
* - uint8_t: input image is RGB888
* @param input_element pointer of input image
* @param input_shape shape of input image
* @param candidates candidate boxes on input image
* @return detection result
*/
template <typename T>
std::list<dl::detect::result_t> &infer(T *input_element, std::vector<int> input_shape, std::list<dl::detect::result_t> &candidates);
};

View File

@ -0,0 +1,40 @@
#pragma once
#include <list>
#include <vector>
#include "dl_detect_define.hpp"
class HumanFaceDetectMSR01
{
private:
void *model;
public:
/**
* @brief Construct a new Human Face Detect MSR01 object
*
* @param score_threshold predicted boxes with score lower than the threshold will be filtered out
* @param nms_threshold predicted boxes with IoU higher than the threshold will be filtered out
* @param top_k first k highest score boxes will be remained
* @param resize_scale resize scale to implement on input image
*/
HumanFaceDetectMSR01(const float score_threshold, const float nms_threshold, const int top_k, float resize_scale);
/**
* @brief Destroy the Human Face Detect MSR01 object
*/
~HumanFaceDetectMSR01();
/**
* @brief Inference.
*
* @tparam T supports uint8_t and uint16_t
* - uint8_t: input image is RGB888
* - uint16_t: input image is RGB565
* @param input_element pointer of input image
* @param input_shape shape of input image
* @return detection result
*/
template <typename T>
std::list<dl::detect::result_t> &infer(T *input_element, std::vector<int> input_shape);
};

View File

@ -0,0 +1,61 @@
#pragma once
#include <vector>
#include "dl_define.hpp"
#include "dl_tool.hpp"
namespace dl
{
namespace nn
{
/**
* @brief Get the output shape object
*
* @param input_shape input shape
* @param filter_shape filter shape with dilation
* @param stride_y stride in height
* @param stride_x stride in width
* @param pad_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN
* @param is_conv2d one of true or false,
* - true: serve for Conv2D
* - false: serve for other operations
* @return std::vector<int>
*/
std::vector<int> get_output_shape(const std::vector<int> &input_shape, const std::vector<int> &filter_shape, const int stride_y, const int stride_x, const padding_type_t pad_type, const bool is_conv2d = false, std::vector<int> padding = {});
/**
* @brief Get the pad size object
*
* @param output_shape output shape
* @param input_shape input shape
* @param filter_shape filter shape with dilation
* @param stride_y stride in height
* @param stride_x stride in width
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN
* @return padding size
*/
std::vector<int> get_pad_size(const std::vector<int> &output_shape, const std::vector<int> &input_shape, const std::vector<int> &filter_shape, const int stride_y, const int stride_x, const padding_type_t padding_type);
} // namespace nn
} // namespace dl
#if DL_LOG_NN_LATENCY
/**
* @brief Initialize.
*/
#define DL_LOG_NN_LATENCY_INIT() dl::tool::Latency latency
/**
* @brief Time starts.
*/
#define DL_LOG_NN_LATENCY_START() latency.start()
/**
* @brief Time ends and printed.
*/
#define DL_LOG_NN_LATENCY_END(key) \
latency.end(); \
latency.print("nn", key)
#else
#define DL_LOG_NN_LATENCY_INIT()
#define DL_LOG_NN_LATENCY_START()
#define DL_LOG_NN_LATENCY_END(key)
#endif

View File

@ -0,0 +1,91 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief activation(add2d(input0, input1)).
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of add2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @param output_exponent exponent of output, only and must specify if inplace operation happens
*/
void add2d(Tensor<int16_t> &output,
Tensor<int16_t> &input0,
Tensor<int16_t> &input1,
const Activation<int16_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
const int output_exponent = INT_MIN);
/**
* @brief activation(add2d(input0, input1)).
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of add2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @param output_exponent exponent of output, only and must specify if inplace operation happens
*/
void add2d(Tensor<int8_t> &output,
Tensor<int8_t> &input0,
Tensor<int8_t> &input1,
const Activation<int8_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE, const int output_exponent = INT_MIN);
/**
* @brief activation(add2d(input0, input1))
*
* @tparam inplace: whether directly store the output to input0
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of add2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @param inplace whether directly store the output to input0
* @return add2d result or no return(result store to input0)
*/
template <bool inplace = false, typename feature_t>
auto add2d(const int output_exponent,
Tensor<feature_t> &input0,
Tensor<feature_t> &input1,
const Activation<feature_t> *activation,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
assert(input0.is_same_shape(input1));
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
add2d(output, input0, input1, activation, assign_core);
DL_LOG_NN_LATENCY_END("add2d");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
add2d(input0, input0, input1, activation, assign_core, output_exponent);
input0.set_exponent(output_exponent);
DL_LOG_NN_LATENCY_END("add2d");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,102 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
#include <stdint.h>
namespace dl
{
namespace nn
{
/**
* @brief avg_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter_shape filter_shape in [filter_height, filter_width]
* @param stride_y stride in height
* @param stride_x stride in width
* @param assign_core not effective yet
*/
void avg_pool2d(Tensor<int16_t> &output,
Tensor<int16_t> &input,
std::vector<int> &padding,
std::vector<int> &filter_shape,
const int stride_y,
const int stride_x,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief avg_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter_shape filter_shape in [filter_height, filter_width]
* @param stride_y stride in height
* @param stride_x stride in width
* @param assign_core not effective yet
*/
void avg_pool2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
std::vector<int> &padding,
std::vector<int> &filter_shape,
const int stride_y,
const int stride_x,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief avg_pool2d(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input as an input
* @param filter_shape filter_shape in [filter_height, filter_width]
* @param stride_y stride in height
* @param stride_x stride in width
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
* - PADDING_VALID: no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* @param assign_core not effective yet
* @return avg_pool2d result
*/
template <typename feature_t>
Tensor<feature_t> avg_pool2d(const int output_exponent,
Tensor<feature_t> &input,
std::vector<int> filter_shape,
const int stride_y,
const int stride_x,
const padding_type_t padding_type,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
std::vector<int> output_shape = get_output_shape(input.shape, filter_shape, stride_y, stride_x, padding_type);
Tensor<feature_t> output;
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
std::vector<int> padding(4, 0);
DL_LOG_NN_LATENCY_START();
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
{
padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
}
DL_LOG_NN_LATENCY_END("padding");
DL_LOG_NN_LATENCY_START();
avg_pool2d(output, input, padding, filter_shape, stride_y, stride_x, assign_core);
DL_LOG_NN_LATENCY_END("avg_pool2d");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,63 @@
#pragma once
#include <vector>
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
template <typename feature_t>
void concat(Tensor<feature_t> &output, std::vector<Tensor<feature_t> *> &inputs, int axis, bool free_inputs = false);
template <typename feature_t>
Tensor<feature_t> concat(std::vector<Tensor<feature_t> *> &inputs, int axis, bool free_inputs = false)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
assert(inputs.size() > 1);
int shape_size = inputs[0]->shape.size();
if (axis < 0)
{
axis = shape_size + axis;
}
assert((axis < shape_size) && (axis > -1));
int output_shape_axis = inputs[0]->shape[axis];
for (int i = 1; i < inputs.size(); i++)
{
assert(shape_size == inputs[i]->shape.size());
assert(inputs[i]->exponent == inputs[i - 1]->exponent);
output_shape_axis += inputs[i]->shape[axis];
for (int j = 0; j < shape_size; j++)
{
if (j != axis)
{
assert(inputs[i]->shape[j] == inputs[i - 1]->shape[j]);
}
}
}
DL_LOG_NN_LATENCY_END("assert");
DL_LOG_NN_LATENCY_START();
Tensor<feature_t> output;
std::vector<int> output_shape = inputs[0]->shape;
output_shape[axis] = output_shape_axis;
output.set_shape(output_shape);
output.set_exponent(inputs[0]->exponent);
output.malloc_element();
DL_LOG_NN_LATENCY_END("malloc");
DL_LOG_NN_LATENCY_START();
concat(output, inputs, axis, free_inputs);
DL_LOG_NN_LATENCY_END("concat");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,22 @@
#pragma once
#include <vector>
#include "dl_variable.hpp"
namespace dl
{
namespace nn
{
/**
* @brief concat2d(input_1, input_2, ...)
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output as an output
* @param inputs a bundle of inputs to be concatenated
*/
template <typename feature_t>
void concat2d(Tensor<feature_t> &output, std::vector<Tensor<feature_t>> inputs);
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,136 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief activation(conv2d(input, filter) + bias).
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter filter of conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param bias bias of conv2d, if you don't specify anything, no bias is added
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
*/
void conv2d(Tensor<int16_t> &output,
Tensor<int16_t> &input,
std::vector<int> &padding,
const Filter<int16_t> &filter,
const int stride_y,
const int stride_x,
const Bias<int16_t> *const bias = NULL,
const Activation<int16_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activation(conv2d(input, filter) + bias).
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter filter of conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param bias bias of conv2d, if you don't specify anything, no bias is added
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
*/
void conv2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
std::vector<int> &padding,
const Filter<int8_t> &filter,
const int stride_y,
const int stride_x,
const Bias<int8_t> *const bias = NULL,
const Activation<int8_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activation(conv2d(input, filter) + bias).
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter filter of conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param bias bias of conv2d, if you don't specify anything, no bias is added
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
*/
void conv2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
std::vector<int> &padding,
const Filter<int8_t> &filter,
const int stride_y,
const int stride_x,
const Bias<int16_t> *const bias = NULL,
const Activation<int8_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activation(conv2d(input, filter) + bias).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input as an input
* @param filter Filter of conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
* - PADDING_VALID: no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* @param bias bias of conv2d, if you don't specify anything, no bias is added
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @return conv2d result
*/
template <typename feature_t, typename bias_t>
Tensor<feature_t> conv2d(const int output_exponent,
Tensor<feature_t> &input,
const Filter<feature_t> &filter,
const int stride_y,
const int stride_x,
const padding_type_t padding_type,
const Bias<bias_t> *bias,
const Activation<feature_t> *activation,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
std::vector<int> output_shape = get_output_shape(input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type, true);
Tensor<feature_t> output;
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
std::vector<int> padding(4, 0);
DL_LOG_NN_LATENCY_START();
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
{
padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
}
DL_LOG_NN_LATENCY_END("padding");
DL_LOG_NN_LATENCY_START();
conv2d(output, input, padding, filter, stride_y, stride_x, bias, activation, assign_core);
DL_LOG_NN_LATENCY_END("conv2d");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,137 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief activate(depthwise_conv2d(input, filter) + bias)
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter Filter of depthwise_conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
*/
void depthwise_conv2d(Tensor<int16_t> &output,
Tensor<int16_t> &input,
std::vector<int> &padding,
const Filter<int16_t> &filter,
const int stride_y,
const int stride_x,
const Bias<int16_t> *bias = NULL,
const Activation<int16_t> *activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activate(depthwise_conv2d(input, filter) + bias)
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter filter of depthwise_conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
*/
void depthwise_conv2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
std::vector<int> &padding,
const Filter<int8_t> &filter,
const int stride_y,
const int stride_x,
const Bias<int8_t> *bias = NULL,
const Activation<int8_t> *activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activate(depthwise_conv2d(input, filter) + bias)
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter Filter of depthwise_conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
*/
void depthwise_conv2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
std::vector<int> &padding,
const Filter<int8_t> &filter,
const int stride_y,
const int stride_x,
const Bias<int16_t> *bias = NULL,
const Activation<int8_t> *activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activation(depthwise_conv2d(input, filter) + bias)
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input as an input
* @param filter filter of depthwise_conv2d
* @param stride_y stride in height
* @param stride_x stride in width
* @param pad_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
* - PADDING_VALID means no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @return depthwise_conv2d result
*/
template <typename feature_t, typename bias_t>
Tensor<feature_t> depthwise_conv2d(const int output_exponent,
Tensor<feature_t> &input,
const Filter<feature_t> &filter,
const int stride_y,
const int stride_x,
const padding_type_t padding_type,
const Bias<bias_t> *bias,
const Activation<feature_t> *activation,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
std::vector<int> output_shape = get_output_shape(input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
Tensor<feature_t> output;
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
std::vector<int> padding(4, 0);
DL_LOG_NN_LATENCY_START();
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
{
padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
}
DL_LOG_NN_LATENCY_END("padding");
DL_LOG_NN_LATENCY_START();
depthwise_conv2d(output, input, padding, filter, stride_y, stride_x, bias, activation, assign_core);
DL_LOG_NN_LATENCY_END("depthwise_conv2d");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,126 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief activation(FullyConnected(input, filter) + bias).
*
* @param output as an output
* @param input as an input
* @param filter filter of FullyConnected
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
* @param assign_core not effective yet
*/
void fully_connected(Tensor<int16_t> &output,
Tensor<int16_t> &input,
const Filter<int16_t> &filter,
const Bias<int16_t> *const bias = NULL,
const Activation<int16_t> *const activation = NULL,
const bool flatten = true,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activation(FullyConnected(input, filter) + bias).
*
* @param output as an output
* @param input as an input
* @param filter filter of FullyConnected
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
* @param assign_core not effective yet
*/
void fully_connected(Tensor<int8_t> &output,
Tensor<int8_t> &input,
const Filter<int8_t> &filter,
const Bias<int8_t> *const bias = NULL,
const Activation<int8_t> *const activation = NULL,
const bool flatten = true,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activation(FullyConnected(input, filter) + bias).
*
* @param output as an output
* @param input as an input
* @param filter filter of FullyConnected
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
* @param assign_core not effective yet
*/
void fully_connected(Tensor<int8_t> &output,
Tensor<int8_t> &input,
const Filter<int8_t> &filter,
const Bias<int16_t> *const bias = NULL,
const Activation<int8_t> *const activation = NULL,
const bool flatten = true,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief activation(FullyConnected(input, filter) + bias).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input as an input
* @param filter Filter of FullyConnected
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
* @param assign_core not effective yet
* @return FullyConnected result
*/
template <typename feature_t>
Tensor<feature_t> fully_connected(const int output_exponent,
Tensor<feature_t> &input,
const Filter<feature_t> &filter,
const Bias<feature_t> *bias,
const Activation<feature_t> *activation,
const bool flatten,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
assert(filter.shape.size() == 4);
assert(filter.shape[0] == 1);
assert(filter.shape[1] == 1);
std::vector<int> output_shape;
if (flatten)
{
assert(input.get_size() == filter.shape[2]);
output_shape = {filter.shape.back()};
}
else
{
assert(input.shape.back() == filter->shape[2]);
output_shape = input.shape;
output_shape[output_shape.size() - 1] = filter.shape.back();
}
Tensor<feature_t> output;
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
fully_connected(output, input, filter, bias, activation, flatten, assign_core);
DL_LOG_NN_LATENCY_END("fully_connected");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,66 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
#include <stdint.h>
namespace dl
{
namespace nn
{
/**
* @brief global_avg_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param assign_core not effective yet
*/
void global_avg_pool2d(Tensor<int16_t> &output,
Tensor<int16_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief global_avg_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param assign_core not effective yet
*/
void global_avg_pool2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief global_avg_pool2d(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input as an input
* @param assign_core not effective yet
* @return global_avg_pool2d result
*/
template <typename feature_t>
Tensor<feature_t> global_avg_pool2d(const int output_exponent,
Tensor<feature_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
std::vector<int> output_shape(input.shape.size(), 1);
output_shape[2] = input.shape[2];
Tensor<feature_t> output;
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
global_avg_pool2d(output, input, assign_core);
DL_LOG_NN_LATENCY_END("global_avg_pool2d");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,64 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
#include <stdint.h>
namespace dl
{
namespace nn
{
/**
* @brief global_max_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param assign_core not effective yet
*/
void global_max_pool2d(Tensor<int16_t> &output,
Tensor<int16_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief global_max_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param assign_core not effective yet
*/
void global_max_pool2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief global_max_pool2d(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param input as an input
* @param assign_core not effective yet
* @return global_max_pool2d result
*/
template <typename feature_t>
Tensor<feature_t> global_max_pool2d(Tensor<feature_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
std::vector<int> output_shape(input.shape.size(), 1);
output_shape[2] = input.shape[2];
Tensor<feature_t> output;
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
global_max_pool2d(output, input, assign_core);
DL_LOG_NN_LATENCY_END("global_max_pool2d");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,82 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief leakyrelu(input).
*
* @param output as an output
* @param input as an input
* @param activation_alpha quantized alpha
* @param activation_exponent exponent of quantized alpha
* @param assign_core not effective yet
*/
void leakyrelu(Tensor<int16_t> &output,
Tensor<int16_t> &input,
const int16_t activation_alpha,
const int activation_exponent,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief leakyrelu(input).
*
* @param output as an output
* @param input as an input
* @param activation_alpha quantized alpha
* @param activation_exponent exponent of quantized alpha
* @param assign_core not effective yet
*/
void leakyrelu(Tensor<int8_t> &output,
Tensor<int8_t> &input,
const int8_t activation_alpha,
const int activation_exponent,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief leakyrelu(input)
*
* @tparam inplace: whether directly store the output to input
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param input as an input
* @param activation_alpha quantized alpha
* @param activation_exponent exponent of quantized alpha
* @param assign_core not effective yet
* @return leakyrelu result or no return(result store to input)
*/
template <bool inplace = false, typename feature_t>
auto leakyrelu(Tensor<feature_t> &input,
const int activation_alpha,
const int activation_exponent,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
leakyrelu(output, input, activation_alpha, activation_exponent, assign_core);
DL_LOG_NN_LATENCY_END("leakyrelu");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
leakyrelu(input, input, activation_alpha, activation_exponent, assign_core);
DL_LOG_NN_LATENCY_END("leakyrelu");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,81 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief max2d(input0, input1)
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
*/
void max2d(Tensor<int16_t> &output,
Tensor<int16_t> &input0,
Tensor<int16_t> &input1,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief max2d(input0, input1)
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @param output_exponent exponent of output, only and must specify if inplace operation happens
*/
void max2d(Tensor<int8_t> &output,
Tensor<int8_t> &input0,
Tensor<int8_t> &input1,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief max2d(input0, input1)
*
* @tparam inplace: whether directly store the output to input0
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @return max2d result or no return(result store to input0)
*/
template <bool inplace = false, typename feature_t>
auto max2d(Tensor<feature_t> &input0,
Tensor<feature_t> &input1,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
assert(input0.is_same_shape(input1));
assert(input0.exponent == input1.exponent);
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(input0.exponent).set_shape(input0.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
max2d(output, input0, input1, assign_core);
DL_LOG_NN_LATENCY_END("max2d");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
max2d(input0, input0, input1, assign_core);
DL_LOG_NN_LATENCY_END("max2d");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,101 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
#include <stdint.h>
namespace dl
{
namespace nn
{
/**
* @brief max_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter_shape filter shape in [filter_height, filter_width]
* @param stride_y stride in height
* @param stride_x stride in width
* @param assign_core not effective yet
*/
void max_pool2d(Tensor<int16_t> &output,
Tensor<int16_t> &input,
std::vector<int> &padding,
std::vector<int> &filter_shape,
const int stride_y,
const int stride_x,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief max_pool2d(input).
*
* @param output as an output
* @param input as an input
* @param padding padding size needed in [top, bottom, left, right] of this operation
* @param filter_shape filter shape in [filter_height, filter_width]
* @param stride_y stride in height
* @param stride_x stride in width
* @param assign_core not effective yet
*/
void max_pool2d(Tensor<int8_t> &output,
Tensor<int8_t> &input,
std::vector<int> &padding,
std::vector<int> &filter_shape,
const int stride_y,
const int stride_x,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief max_pool2d(input).
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param input as an input
* @param filter_shape filter shape in [filter_height, filter_width]
* @param stride_y stride in height
* @param stride_x stride in width
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
* - PADDING_VALID: no padding
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
* such that output has the same height/width dimension as the input,
* - PADDING_SAME_END results padding in TensorFlow style
* - PADDING_SAME_BEGIN results padding in MXNET style
* @param assign_core not effective yet
* @return max_pool2d result
*/
template <typename feature_t>
Tensor<feature_t> max_pool2d(Tensor<feature_t> &input,
std::vector<int> filter_shape,
const int stride_y,
const int stride_x,
const padding_type_t padding_type,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
std::vector<int> output_shape = get_output_shape(input.shape, filter_shape, stride_y, stride_x, padding_type);
Tensor<feature_t> output;
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
std::vector<int> padding(4, 0);
DL_LOG_NN_LATENCY_START();
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
{
padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
}
DL_LOG_NN_LATENCY_END("padding");
DL_LOG_NN_LATENCY_START();
max_pool2d(output, input, padding, filter_shape, stride_y, stride_x, assign_core);
DL_LOG_NN_LATENCY_END("max_pool2d");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,80 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief min2d(input0, input1)
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param assign_core
*/
void min2d(Tensor<int16_t> &output,
Tensor<int16_t> &input0,
Tensor<int16_t> &input1,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief min2d(input0, input1)
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param assign_core
*/
void min2d(Tensor<int8_t> &output,
Tensor<int8_t> &input0,
Tensor<int8_t> &input1,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief min2d(input0, input1)
*
* @tparam inplace: whether directly store the output to input0
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param input0 as one input
* @param input1 as another input
* @param assign_core not effective yet
* @return min2d result or no return(result store to input0)
*/
template <bool inplace = false, typename feature_t>
auto min2d(Tensor<feature_t> &input0,
Tensor<feature_t> &input1,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
assert(input0.is_same_shape(input1));
assert(input0.exponent == input1.exponent);
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(input0.exponent).set_shape(input0.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
min2d(output, input0, input1, assign_core);
DL_LOG_NN_LATENCY_END("min2d");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
min2d(input0, input0, input1, assign_core);
DL_LOG_NN_LATENCY_END("min2d");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,91 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief activation(mul2d(input0, input1)).
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of mul2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @param output_exponent exponent of output, only and must specify if inplace operation happens
*/
void mul2d(Tensor<int16_t> &output,
Tensor<int16_t> &input0,
Tensor<int16_t> &input1,
const Activation<int16_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
const int output_exponent = INT_MIN);
/**
* @brief activation(mul2d(input0, input1)).
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of mul2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @param output_exponent exponent of output, only and must specify if inplace operation happens
*/
void mul2d(Tensor<int8_t> &output,
Tensor<int8_t> &input0,
Tensor<int8_t> &input1,
const Activation<int8_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
const int output_exponent = INT_MIN);
/**
* @brief activation(mul2d(input0, input1)).
*
* @tparam inplace: whether directly store the output to input0
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of mul2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @return mul2d result or no return(result store to input0)
*/
template <bool inplace = false, typename feature_t>
auto mul2d(const int output_exponent,
Tensor<feature_t> &input0,
Tensor<feature_t> &input1,
const Activation<feature_t> *activation,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
assert(input0.is_same_shape(input1));
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
mul2d(output, input0, input1, activation, assign_core);
DL_LOG_NN_LATENCY_END("mul2d");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
mul2d(input0, input0, input1, activation, assign_core, output_exponent);
DL_LOG_NN_LATENCY_END("mul2d");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,120 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief pad(input)
*
* @tparam feature_t
* @param output as an output
* @param input as an input
* @param paddings number of values padded to the edges of each dim
* @param constant_values used in PADDING_CONSTANT, the values to set the padded values for each dim
* @param mode One of the following: PADDING_EMPTY, PADDING_CONSTANT, PADDING_EDGE, PADDING_REFLECT, PADDING_SYMMETRIC
* @param assign_core not effective yet
*/
template <typename feature_t>
void pad(Tensor<feature_t> &output,
Tensor<feature_t> &input,
std::vector<int> paddings,
std::vector<feature_t> constant_values,
padding_mode_t mode,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief
*
* @tparam feature_t
* @param input as an input
* @param paddings number of values padded to the edges of each dim
* @param constant_values used in PADDING_CONSTANT, the values to set the padded values for each dim
* @param mode One of the following: PADDING_EMPTY, PADDING_CONSTANT, PADDING_EDGE, PADDING_REFLECT, PADDING_SYMMETRIC
* @param assign_core not effective yet
* @return Tensor<feature_t> the padded Tensor
*/
template <typename feature_t>
Tensor<feature_t> pad(Tensor<feature_t> &input,
std::vector<int> paddings,
std::vector<feature_t> constant_values,
padding_mode_t mode,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
assert(paddings.size() > 0);
int input_dims = input.shape.size();
int padding_dims = input_dims * 2;
std::vector<int> _paddings(padding_dims, 0);
if (paddings.size() == 1)
{
for (int i = 0; i < padding_dims; ++i)
{
_paddings[i] = paddings[0];
}
}
else if (paddings.size() == 2)
{
for (int i = 0; i < input_dims; ++i)
{
_paddings[2 * i] = paddings[0];
_paddings[2 * i + 1] = paddings[1];
}
}
else
{
assert(paddings.size() == padding_dims);
_paddings = paddings;
}
std::vector<feature_t> _constant_values(padding_dims, 0);
if (mode == PADDING_CONSTANT)
{
if (constant_values.size() == 1)
{
for (int i = 0; i < padding_dims; ++i)
{
_constant_values[i] = constant_values[0];
}
}
else if (constant_values.size() == 2)
{
for (int i = 0; i < input_dims; ++i)
{
_constant_values[2 * i] = constant_values[0];
_constant_values[2 * i + 1] = constant_values[1];
}
}
else
{
assert(constant_values.size() == padding_dims);
_constant_values = constant_values;
}
}
std::vector<int> output_shape = input.shape;
for (int i = 0; i < input_dims; ++i)
{
output_shape[i] += (_paddings[2 * i] + _paddings[2 * i + 1]);
}
Tensor<feature_t> output;
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
pad(output, input, _paddings, _constant_values, mode, assign_core);
DL_LOG_NN_LATENCY_END("pad");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,82 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief prelu(input).
*
* @param output as an output
* @param input as an input
* @param activation_element quantized alpha elements along channel axis
* @param activation_exponent exponent of quantized alpha elements
* @param assign_core not effective yet
*/
void prelu(Tensor<int16_t> &output,
Tensor<int16_t> &input,
const int16_t *activation_element,
const int activation_exponent,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief prelu(input).
*
* @param output as an output
* @param input as an input
* @param activation_element quantized alpha elements along channel axis
* @param activation_exponent exponent of quantized alpha elements
* @param assign_core not effective yet
*/
void prelu(Tensor<int8_t> &output,
Tensor<int8_t> &input,
const int8_t *activation_element,
const int activation_exponent,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief prelu(input)
*
* @tparam inplace: whether directly store the output to input
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param input as an input
* @param activation_element quantized alpha elements along channel axis
* @param activation_exponent exponent of quantized alpha elements
* @param assign_core not effective yet
* @return prelu result or no return(result store to input)
*/
template <bool inplace = false, typename feature_t>
auto prelu(Tensor<feature_t> &input,
const feature_t *activation_element,
const int activation_exponent,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
prelu(output, input, activation_element, activation_exponent, assign_core);
DL_LOG_NN_LATENCY_END("prelu");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
prelu(input, input, activation_element, activation_exponent, assign_core);
DL_LOG_NN_LATENCY_END("prelu");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,70 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief relu(input).
*
* @param output as an output
* @param input as an input
* @param assign_core not effective yet
*/
void relu(Tensor<int16_t> &output,
Tensor<int16_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief relu(input).
*
* @param output as an output
* @param input as an input
* @param assign_core not effective yet
*/
void relu(Tensor<int8_t> &output,
Tensor<int8_t> &input,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief relu(input)
*
* @tparam inplace: whether directly store the output to input
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param input as an input
* @param assign_core not effective yet
* @return relu result or no return(result store to input)
*/
template <bool inplace = false, typename feature_t>
auto relu(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
relu(output, input, assign_core);
DL_LOG_NN_LATENCY_END("relu");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
relu(input, input, assign_core);
DL_LOG_NN_LATENCY_END("relu");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,90 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief activation(sub2d(input0, input1)).
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of sub2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @param output_exponent exponent of output, only and must specify if inplace operation happens
*/
void sub2d(Tensor<int16_t> &output,
Tensor<int16_t> &input0,
Tensor<int16_t> &input1,
const Activation<int16_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
const int output_exponent = INT_MIN);
/**
* @brief activation(sub2d(input0, input1)).
*
* @param output as an output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of sub2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @param output_exponent exponent of output, only and must specify if inplace operation happens
*/
void sub2d(Tensor<int8_t> &output,
Tensor<int8_t> &input0,
Tensor<int8_t> &input1,
const Activation<int8_t> *const activation = NULL,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
const int output_exponent = INT_MIN);
/**
* @brief activation(sub2d(input0, input1)).
*
* @tparam inplace: whether directly store the output to input0
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
* @param output_exponent exponent of output
* @param input0 as one input
* @param input1 as another input
* @param activation activation of sub2d, if you don't specify anything, no activation is applied
* @param assign_core not effective yet
* @return sub2d result or no return(result store to input0)
*/
template <bool inplace = false, typename feature_t>
auto sub2d(const int output_exponent,
Tensor<feature_t> &input0,
Tensor<feature_t> &input1,
const Activation<feature_t> *activation,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
{
assert(input0.is_same_shape(input1));
DL_LOG_NN_LATENCY_INIT();
Tensor<feature_t> output;
if constexpr (!inplace)
{
DL_LOG_NN_LATENCY_START();
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
sub2d(output, input0, input1, activation, assign_core);
DL_LOG_NN_LATENCY_END("sub2d");
return output;
}
else
{
DL_LOG_NN_LATENCY_START();
sub2d(input0, input0, input1, activation, assign_core, output_exponent);
DL_LOG_NN_LATENCY_END("sub2d");
}
}
} // namespace nn
} // namespace dl

View File

@ -0,0 +1,427 @@
#pragma once
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "esp_system.h"
#include "esp_timer.h"
#include "freertos/FreeRTOS.h"
#include "dl_define.hpp"
extern "C"
{
#if CONFIG_TIE728_BOOST
void dl_tie728_memset_8b(void *ptr, const int value, const int n);
void dl_tie728_memset_16b(void *ptr, const int value, const int n);
void dl_tie728_memset_32b(void *ptr, const int value, const int n);
#endif
}
namespace dl
{
namespace tool
{
/**
* @brief Set memory zero.
*
* @param ptr pointer of memory
* @param n byte number
*/
void set_zero(void *ptr, const int n);
/**
* @brief Set array value.
*
* @tparam T supports all data type, sizeof(T) equals to 1, 2 and 4 will boost by instruction
* @param ptr pointer of array
* @param value value to set
* @param len length of array
*/
template <typename T>
void set_value(T *ptr, const T value, const int len)
{
#if CONFIG_TIE728_BOOST
int *temp = (int *)&value;
if (sizeof(T) == 1)
dl_tie728_memset_8b(ptr, *temp, len);
else if (sizeof(T) == 2)
dl_tie728_memset_16b(ptr, *temp, len);
else if (sizeof(T) == 4)
dl_tie728_memset_32b(ptr, *temp, len);
else
#endif
for (size_t i = 0; i < len; i++)
ptr[i] = value;
}
/**
* @brief Copy memory.
*
* @param dst pointer of destination
* @param src pointer of source
* @param n byte number
*/
void copy_memory(void *dst, void *src, const int n);
/**
* @brief Apply memory without initialized. Can use free_aligned() to free the memory.
*
* @param number number of elements
* @param size size of element
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
* @return pointer of allocated memory. NULL for failed
*/
inline void *malloc_aligned(int number, int size, int align = 4)
{
assert((align > 0) && (((align & (align-1)) == 0)));
int total_size = number * size;
void *res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
#if DL_SPIRAM_SUPPORT
if (NULL == res)
res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_SPIRAM);
#endif
if (NULL == res)
{
printf("Fail to malloc %d bytes from DRAM(%d bytyes) and PSRAM(%d bytes), PSRAM is %s.\n",
total_size,
heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
heap_caps_get_free_size(MALLOC_CAP_SPIRAM),
DL_SPIRAM_SUPPORT ? "on" : "off");
return NULL;
}
return (void *)res;
}
/**
* @brief Apply memory with zero-initialized. Can use free_aligned() to free the memory.
*
* @param number number of elements
* @param size size of element
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
* @return pointer of allocated memory. NULL for failed
*/
inline void *calloc_aligned(int number, int size, int align = 4)
{
void *aligned = malloc_aligned(number, size, align);
set_zero(aligned, number * size);
return (void *)aligned;
}
/**
* @brief Free the calloc_aligned() and malloc_aligned() memory
*
* @param address pointer of memory to free
*/
inline void free_aligned(void *address)
{
if (NULL == address)
return;
heap_caps_free(address);
}
/**
* @brief Apply memory without initialized in preference order: internal aligned, internal, external aligned
*
* @param number number of elements
* @param size size of element
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
* @return pointer of allocated memory. NULL for failed
*/
inline void *malloc_aligned_prefer(int number, int size, int align = 4)
{
assert((align > 0) && (((align & (align-1)) == 0)));
int total_size = number * size;
void *res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
if (NULL == res){
res = heap_caps_malloc(total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
}
#if DL_SPIRAM_SUPPORT
if (NULL == res){
res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_SPIRAM);
}
#endif
if (NULL == res)
{
printf("Fail to malloc %d bytes from DRAM(%d bytyes) and PSRAM(%d bytes), PSRAM is %s.\n",
total_size,
heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
heap_caps_get_free_size(MALLOC_CAP_SPIRAM),
DL_SPIRAM_SUPPORT ? "on" : "off");
return NULL;
}
return res;
}
/**
* @brief Apply memory with zero-initialized in preference order: internal aligned, internal, external aligned
*
* @param number number of elements
* @param size size of element
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
* @return pointer of allocated memory. NULL for failed
*/
inline void *calloc_aligned_prefer(int number, int size, int align = 4)
{
void *res = malloc_aligned_prefer(number, size, align);
set_zero(res, number * size);
return (void *)res;
}
/**
* @brief Free the calloc_aligned_prefer() and malloc_aligned_prefer() memory
*
* @param address pointer of memory to free
*/
inline void free_aligned_prefer(void *address)
{
if (NULL == address)
return;
heap_caps_free(address);
}
/**
* @brief Truncate the input into int8_t range.
*
* @tparam T supports all integer types
* @param output as an output
* @param input as an input
*/
template <typename T>
void truncate(int8_t &output, T input)
{
if (input >= DL_Q8_MAX)
output = DL_Q8_MAX;
else if (input <= DL_Q8_MIN)
output = DL_Q8_MIN;
else
output = input;
}
/**
* @brief Truncate the input into int16_t range.
*
* @tparam T supports all integer types
* @param output as an output
* @param input as an input
*/
template <typename T>
void truncate(int16_t &output, T input)
{
if (input >= DL_Q16_MAX)
output = DL_Q16_MAX;
else if (input <= DL_Q16_MIN)
output = DL_Q16_MIN;
else
output = input;
}
/**
* @brief Calculate the exponent of quantizing 1/n into max_value range.
*
* @param n 1/n: value to be quantized
* @param max_value the max_range
*/
inline int calculate_exponent(int n, int max_value)
{
int exp = 0;
int tmp = 1 / n;
while (tmp < max_value)
{
exp += 1;
tmp = (1 << exp) / n;
}
exp -= 1;
return exp;
}
/**
* @brief Print vector in format "[x1, x2, ...]\n".
*
* @param array to print
*/
inline void print_vector(std::vector<int> &array, const char *message = NULL)
{
if (message)
printf("%s: ", message);
printf("[");
for (int i = 0; i < array.size(); i++)
{
printf(", %d" + (i ? 0 : 2), array[i]);
}
printf("]\n");
}
/**
* @brief Get the cycle object
*
* @return cycle count
*/
inline uint32_t get_cycle()
{
uint32_t ccount;
__asm__ __volatile__("rsr %0, ccount"
: "=a"(ccount)
:
: "memory");
return ccount;
}
class Latency
{
private:
const uint32_t size; /*<! size of queue */
uint32_t *queue; /*<! queue for storing history period */
uint32_t period; /*<! current period */
uint32_t sum; /*<! sum of period */
uint32_t count; /*<! the number of added period */
uint32_t next; /*<! point to next element in queue */
uint32_t timestamp; /*<! record the start >*/
public:
/**
* @brief Construct a new Latency object.
*
* @param size
*/
Latency(const uint32_t size = 1) : size(size),
period(0),
sum(0),
count(0),
next(0)
{
this->queue = (this->size > 1) ? (uint32_t *)calloc(this->size, sizeof(uint32_t)) : NULL;
}
/**
* @brief Destroy the Latency object.
*
*/
~Latency()
{
if (this->queue)
free(this->queue);
}
/**
* @brief Record the start timestamp.
*
*/
void start()
{
#if DL_LOG_LATENCY_UNIT
this->timestamp = get_cycle();
#else
this->timestamp = esp_timer_get_time();
#endif
}
/**
* @brief Record the period.
*
*/
void end()
{
#if DL_LOG_LATENCY_UNIT
this->period = get_cycle() - this->timestamp;
#else
this->period = esp_timer_get_time() - this->timestamp;
#endif
if (this->queue)
{
this->sum -= this->queue[this->next];
this->queue[this->next] = this->period;
this->sum += this->queue[this->next];
this->next++;
this->next = this->next % this->size;
if (this->count < this->size)
{
this->count++;
}
}
}
/**
* @brief Return the period.
*
* @return this->timestamp_end - this->timestamp
*/
uint32_t get_period()
{
return this->period;
}
/**
* @brief Get the average period.
*
* @return average latency
*/
uint32_t get_average_period()
{
return this->queue ? (this->sum / this->count) : this->period;
}
/**
* @brief Clear the period
*
*/
void clear_period()
{
this->period = 0;
}
/**
* @brief Print in format "latency: {this->period} {unit}\n".
*/
void print()
{
#if DL_LOG_LATENCY_UNIT
printf("latency: %15u cycle\n", this->get_average_period());
#else
printf("latency: %15u us\n", this->get_average_period());
#endif
}
/**
* @brief Print in format "{message}: {this->period} {unit}\n".
*
* @param message message of print
*/
void print(const char *message)
{
#if DL_LOG_LATENCY_UNIT
printf("%s: %15u cycle\n", message, this->get_average_period());
#else
printf("%s: %15u us\n", message, this->get_average_period());
#endif
}
/**
* @brief Print in format "{prefix}::{key}: {this->period} {unit}\n".
*
* @param prefix prefix of print
* @param key key of print
*/
void print(const char *prefix, const char *key)
{
#if DL_LOG_LATENCY_UNIT
printf("%s::%s: %u cycle\n", prefix, key, this->get_average_period());
#else
printf("%s::%s: %u us\n", prefix, key, this->get_average_period());
#endif
}
};
} // namespace tool
} // namespace dl

View File

@ -0,0 +1,74 @@
#pragma once
#include <stdint.h>
#if CONFIG_IDF_TARGET_ESP32S3
#include "esp32s3/rom/cache.h"
#include "soc/extmem_reg.h"
#endif
namespace dl
{
namespace tool
{
namespace cache
{
/**
* @brief Initialize preload.
*
* @param preload One of 1 or 0,
* - 1: turn on the preload
* - 0: turn off the preload
* @return
* - 1: Initialize successfully
* - 0: Initialize successfully, autoload has been turned off
* - -1: Initialize failed, the chip does not support preload
*/
int8_t preload_init(uint8_t preload = 1);
/**
* @brief Preload memory.
*
* @param addr the start address of data to be preloaded
* @param size the size of the data in byte to be preloaded
*/
void preload_func(uint32_t addr, uint32_t size);
/**
* @brief Initialize autoload.
*
* @param autoload One of 1 or 0,
* - 1: turn on the autoload
* - 0: turn off the autoload
* @param trigger One of 0 or 1 or 2,
* - 0: miss, TODO:@yuanjiong
* - 1: hit, TODO:@yuanjiong
* - 2: both,TODO:@yuanjiong
* @param line_size the number of cache lines to be autoloaded
* @return status,
* - 1: Initialize sucessfully
* - 0: Initialize suceesfully, preload has been turned off
* - -1: Initialize failed, the chip does not support autoload
*/
int8_t autoload_init(uint8_t autoload = 1, uint8_t trigger = 2, uint8_t line_size = 0);
/**
* @brief Autoload memory.
*
* @param addr1 the start address of data1 to be autoloaded
* @param size1 the size of the data1 in byte to be preloaded
* @param addr2 the start address of data2 to be autoloaded
* @param size2 the size of the data2 in byte to be preloaded
*/
void autoload_func(uint32_t addr1, uint32_t size1, uint32_t addr2, uint32_t size2);
/**
* @brief Autoload memory.
*
* @param addr1 the start address of data1 to be autoloaded
* @param size1 the size of the data1 in byte to be preloaded
*/
void autoload_func(uint32_t addr1, uint32_t size1);
}
} // namespace tool
} // namespace dl

View File

@ -0,0 +1,129 @@
#pragma once
#include "dl_define.hpp"
#include <vector>
#include <stdint.h>
namespace dl
{
/**
* @brief Base class of Filter, Bias, Activation.
*
* @tparam T supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize,
* - int8_t: stands for operation in int8_t quantize.
*/
template <typename T>
class Constant
{
public:
const T *element; /*<! point to element. >*/
const int exponent; /*<! exponent of element. >*/
const std::vector<int> shape; /*<! shape of element. >*/
/**
* @brief Construct a new Constant object.
*
* @param element point to element.
* @param exponent exponent of element.
* @param shape shape of Constant.
*/
Constant(const T *element, const int exponent, const std::vector<int> shape);
};
/**
* @brief Filter.
* NOTE: The shape format of filter is fixed, but the element sequence depands on optimization method.
* - 1D: reserved
* - 2D: shape format is [filter_height, filter_width, input_channel, output_channel]. dilation format is [height, width]
*
* @tparam T supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize,
* - int8_t: stands for operation in int8_t quantize.
*/
template <typename T>
class Filter : public Constant<T>
{
public:
const std::vector<int> dilation; /*<! - 1D: reserved >*/
/*<! - 2D: [dilation_in_height, dilation_in_width] >*/
std::vector<int> shape_with_dilation; /*<! - 1D: reserved >*/
/*<! - 2D: [filter_height_with_dilation, filter_width_with_dilation, input_channel, output_channel] >*/
const int8_t* channel_exponent; /*<! exponent for per-channel >*/
const int channel_exponent_size;
/**
* @brief Construct a new Filter object.
*
* @param element point to element
* @param exponent exponent of element
* @param shape shape of Filter,
* - 1D: reserved
* - 2D: for convolution is [filter_height, filter_width, input_channel, output_channel],
* for depthwise convolution is [filter_height, filter_width, input_channel, 1]
* @param dilation dilation of Filter
* - 1D: reserved
* - 2D: [dilation_in_height, dilation_in_width]
*/
Filter(const T *element, const int exponent, const std::vector<int> shape, const std::vector<int> dilation = {1, 1});
/**
* @brief Construct a new Filter object. it is only avaliable to int16_t
*
* @param element point to element
* @param channel_exponent exponent for per-channel
* @param channel_exponent_size size of exponent
* @param shape shape of element
* @param dilation dilation of Filter
* - 1D: reserved
* - 2D: [dilation_in_height, dilation_in_width]
*/
Filter(const T *element, const int8_t* channel_exponent, const int channel_exponent_size, const std::vector<int> shape, const std::vector<int> dilation = {1, 1});
/**
* @brief Print the n-th filter.
*
* @param n index of output_channel
* @param message to print
*/
void print2d_n(const int n, const char *message) const;
};
/**
* @brief Bias.
*
* @tparam T supports int16_t and int8_t
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename T>
class Bias : public Constant<T>
{
public:
using Constant<T>::Constant;
};
/**
* @brief Activation.
*
* @tparam T supports int16_t and int8_t
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename T>
class Activation : public Constant<T>
{
public:
const activation_type_t type; /*<! One of Linear or ReLU or LeakyReLU or PReLU */
/**
* @brief Construct a new Activation object.
*
* @param type One of Linear or ReLU or LeakyReLU or PReLU
* @param element point to element of activation
* @param exponent exponent of element
* @param shape shape of element
*/
Activation(const activation_type_t type, const T *element = NULL, const int exponent = 0, const std::vector<int> shape = {0});
};
} // namespace dl

View File

@ -0,0 +1,553 @@
#pragma once
#include <stdio.h>
#include <vector>
#include <assert.h>
#include <iostream>
#include "dl_tool.hpp"
namespace dl
{
/**
* @brief Tensor
*
* @tparam T support uint8_t, int8_t, int16_t and float.
*/
template <typename T>
class Tensor
{
private:
int size; /*<! size of element including padding */
bool auto_free; /*<! free element when object destroy */
std::vector<int> axis_offset; /*<! element offset of each axis */
public:
T *element; /*<! point to element */
int exponent; /*<! exponent of element */
std::vector<int> shape; /*<! shape of Tensor */
/**
* @brief Construct a new Tensor object
*
*/
Tensor() : auto_free(true), element(NULL), exponent(0) { this->set_shape({0}); }
/**
* @brief Construct a new Tensor object by copying from input.
*
* @param input an input Tensor
* @param deep one of true or false
* - true: apply a new memory, copy value from input.element to this new memory
* - false: take over input.element to this->element
*/
Tensor(Tensor<T> &input, bool deep) : size(input.size),
auto_free(input.auto_free),
exponent(input.exponent)
{
this->set_shape(input.shape);
if (deep && (input.element != NULL))
{
int size_real = input.get_size();
T *new_element = (T *)tool::calloc_aligned_prefer(size_real, sizeof(T), 16);
tool::copy_memory(new_element, input.element, size_real * sizeof(T));
this->element = new_element;
}
else
{
this->element = input.element;
this->auto_free = false;
}
}
/**
* @brief Destroy the Tensor object
*
*/
~Tensor()
{
if (this->auto_free)
this->free_element();
}
/**
* @brief copy the element of the input Tensor.
*
* @param input an input Tensor
* @param deep one of true or false
* - true: apply a new memory, copy value from input.element to this new memory
* - false: take over input.element to this->element
* @return Tensor<T>& self
*/
Tensor<T> &copy_element(Tensor<T> &input, bool deep)
{
assert(this->get_size() == input.get_size());
assert(input.element != NULL);
this->malloc_element();
if (deep)
{
tool::copy_memory(this->element, input.element, this->get_size() * sizeof(T));
}
else
{
this->element = input.element;
this->auto_free = false;
}
return *this;
}
/**
* @brief Set the auto free object.
*
* @param auto_free one of true or false
* - true: free element when object destroyed
* - false: do not
* @return self
*/
Tensor<T> &set_auto_free(const bool auto_free)
{
this->auto_free = auto_free;
return *this;
}
/**
* @brief Set the element.
*
* @param element point to element memory
* @return self
*/
Tensor<T> &set_element(T *element, const bool auto_free = false)
{
assert(this->element == NULL);
this->element = element;
this->auto_free = auto_free;
return *this;
}
/**
* @brief Set the exponent.
*
* @param exponent exponent of element
* @return self
*/
Tensor<T> &set_exponent(const int exponent)
{
this->exponent = exponent;
return *this;
}
/**
* @brief Set the shape of Tensor.
*
* @param shape the target shape
*
* @return self
*/
Tensor<T> &set_shape(const std::vector<int> shape);
/**
* @brief print the shape of the Tensor
*
*/
void print_shape()
{
if (this->shape.size())
{
printf("shape = (");
for (int i = 0; i < this->shape.size() - 1; i++)
{
printf("%d, ", this->shape[i]);
}
printf("%d)\n", this->shape.back());
}
else
{
printf("shape = ()\n");
}
}
/**
* @brief flatten the Tensor
*
* @return Tensor<T>& self
*/
Tensor<T> &flatten();
/**
* @brief Change a new shape to the Tensor without changing its data.
*
* @param shape the target shape
* @return Tensor<T>& self
*/
Tensor<T> &reshape(std::vector<int> shape);
/**
* @brief Remove dims with length==1 from Tensor
*
* @param axis the dim to to be remove. make sure the length of the dim is equal to 1.
* if axis == INT32_MAX, all the dims with length==1 will be removed.
* @return Tensor<T>& self
*/
Tensor<T> &squeeze(int axis = INT32_MAX);
/**
* @brief Insert a new dim that will appear at the axis position in the expanded Tensor shape.
*
* @param axis the dim to be inserted
* @return Tensor<T>& self
*/
Tensor<T> &expand_dims(int axis);
/**
* @brief Insert a new dim that will appear at the axis position in the expanded Tensor shape.
*
* @param axis the dim to be inserted
* @return Tensor<T>& self
*/
Tensor<T> &expand_dims(std::vector<int> axis);
/**
* @brief Reverse or permute the axes of the Tensor
*
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
* @return Tensor<T>& self
*/
Tensor<T> &transpose(std::vector<int> perm = {});
/**
* @brief Reverse or permute the axes of the input Tensor
*
* @param input the input Tensor
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
* @return Tensor<T>& self
*/
Tensor<T> &transpose(Tensor<T> &input, std::vector<int> perm = {});
/**
* @brief Get the element pointer.
*
* @return pointer to memory
*/
T *get_element_ptr()
{
return this->element;
}
/**
* @brief Get the element value.
*
* @param index the index of each dim.
* @return T element value
*/
T get_element_value(const std::vector<int> index)
{
return this->element[this->get_element_index(index)];
}
/**
* @brief Get the element value.
*
* @param index the index of the element.
* @return T element value
*/
T get_element_value(int index)
{
return this->element[index];
}
/**
* @brief Set the all the element to value.
*
* @param value target value
* @return Tensor<T>& self
*/
Tensor<T> &set_value(T value);
/**
* @brief Set the the element to value
*
* @param value target value, it will be broadcast automatically.
* @return Tensor<T>& self
*/
Tensor<T> &set_value(Tensor<T> &value);
/**
* @brief Set the sliced element to value
*
* @param axis_index_range range of slices
* @param value target value
* @return Tensor<T>& self
*/
Tensor<T> &set_value(std::vector<int> axis_index_range, T value);
/**
* @brief Set the sliced element to value
*
* @param axis_index_range range of slices
* @param value target value, it will be broadcast automatically.
* @return Tensor<T>& self
*/
Tensor<T> &set_value(std::vector<int> axis_index_range, Tensor<T> &value);
/**
* @brief Extracts a slice from the Tensor.
*
* @param axis_index_range range of slices
* @return Tensor<T> output
*/
Tensor<T> slice(std::vector<int> axis_index_range);
/**
* @brief Reverses specific dims of the tensor.
*
* @param axis The dims to be reversed
* @return Tensor<T>&
*/
Tensor<T> &reverse(std::vector<int> axis);
/**
* @brief Get the size of Tensor.
*
* @return the size of Tensor.
*/
int get_size()
{
return this->size;
}
/**
* @brief Get the axis offset
*
* @return std::vector<int> the axis offset
*/
std::vector<int> get_axis_offset()
{
return this->axis_offset;
}
/**
* @brief Apply memory with zero-initialized only if this->element is NULL.
*
* @param auto_free one of true or false
* - true: free element when object destroyed
* - false: do not
* @return
* - true: on success
* - false: if applying failed
*/
bool calloc_element(const bool auto_free = true)
{
if (this->element != NULL)
return false;
this->element = (T *)dl::tool::calloc_aligned_prefer(this->get_size(), sizeof(T), 16);
this->auto_free = auto_free;
return true;
}
/**
* @brief Apply memory without initialized only if this->element is NULL.
*
* @param auto_free one of true or false
* - true: free element when object destroyed
* - false: do not
* @return
* - true: on success
* - false: if applying failed
*/
bool malloc_element(const bool auto_free = true)
{
if (this->element != NULL)
return false;
this->element = (T *)tool::malloc_aligned_prefer(this->get_size(), sizeof(T), 16);
this->auto_free = auto_free;
return true;
}
/**
* @brief free element only if this->element != NULL
* set this->element to NULL, after free
* @brief Free element if this->element is not NULL.
*/
void free_element()
{
if (this->auto_free && this->element)
{
tool::free_aligned_prefer(this->element);
this->element = NULL;
}
}
/**
* @brief print the element of the tensor
*
* @param axis_index_range the element range of each dims to be print. if axis_index_range == {}, all the element will be print.
* @param message to print
*/
void print(std::vector<int> axis_index_range = {}, const char *message = "");
/**
* @brief print all the element of the Tensor.
*
* @param message to print
*/
void print_all(const char *message = "")
{
std::cout << "\n"
<< message << " | ";
this->print_shape();
for (int i = 0; i < this->get_size(); i++)
{
std::cout << this->element[i] << " ";
}
std::cout << "\n";
return;
}
/**
* @brief Get the index of each dims
*
* @param element_index the index of the element
* @return std::vector<int> the index of each dims
*/
std::vector<int> get_axis_index(int element_index);
/**
* @brief Get the index of element
*
* @param axis_index the index of each dims
* @return int the index of element
*/
int get_element_index(const std::vector<int> axis_index);
/**
* @brief Check the element value with input ground-truth.
*
* @param gt_element ground-truth value of element
* @param bias permissible error
* @param info one of true or false
* - true: shape and result
* - false: do not
* @param failed_number maximum number of wrong element that will be printed
*
* @return
* - true: in permissible error
* - false: not
*/
bool check_element(T *gt_element, int bias = 2, bool info = true, int failed_number = 0)
{
int count = 0;
if (info)
this->print_shape();
int size = this->get_size();
for (int i = 0; i < size; i++)
{
if (DL_ABS(this->element[i] - gt_element[i]) > bias)
{
std::vector<int> index = get_axis_index(i);
std::cout << "element[";
for (int j = 0; j < index.size() - 1; j++)
{
std::cout << index[j] << ", ";
}
std::cout << index.back() << "]: ";
std::cout << +this->element[i] << " v.s. " << +gt_element[i] << "\n";
count++;
if (count > failed_number)
return false;
}
}
if (count)
return false;
if (info)
printf("PASS\n");
return true;
}
/**
* @brief Check the shape is the same as the shape of input.
*
* @param input an input tensor
* @return
* - true: same shape
* - false: not
*/
bool is_same_shape(Tensor<T> &input)
{
if (input.shape.size() != this->shape.size())
{
return false;
}
for (int i = 0; i < this->shape.size(); i++)
{
if (input.shape[i] != this->shape[i])
{
return false;
}
}
return true;
}
Tensor<T> &operator=(const Tensor<T> &input)
{
this->auto_free = input.auto_free;
this->exponent = input.exponent;
int size_real_tmp = this->size;
int size_input_real = input.size;
this->set_shape(input.shape);
if (input.element)
{
if (this->element)
{
if (size_real_tmp != size_input_real)
{
tool::free_aligned_prefer(this->element);
T *new_element = (T *)tool::malloc_aligned_prefer(size_input_real, sizeof(T), 16);
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
this->element = new_element;
}
else
{
tool::copy_memory(this->element, input.element, size_input_real * sizeof(T));
}
}
else
{
T *new_element = (T *)tool::malloc_aligned_prefer(size_input_real, sizeof(T), 16);
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
this->element = new_element;
}
return *this;
}
else
{
if (this->element)
{
tool::free_aligned_prefer(this->element);
this->element = NULL;
}
return *this;
}
}
static Tensor<T> arange(int size)
{
Tensor<T> output;
output.set_auto_free(true).set_exponent(0).set_shape({size}).malloc_element();
for (int i = 0; i < size; ++i)
{
output.element[i] = i;
}
return output;
}
};
} // namespace dl