forked from espressif/arduino-esp32
IDF master b86fe0c66c
This commit is contained in:
@ -18,6 +18,11 @@
|
||||
# define ASIO_NO_TYPEID
|
||||
# endif // CONFIG_COMPILER_RTTI
|
||||
|
||||
//
|
||||
// Supress OpenSSL deprecation warning, when building ASIO
|
||||
//
|
||||
#define ESP_OPENSSL_SUPPRESS_LEGACY_WARNING
|
||||
|
||||
//
|
||||
// LWIP compatibility inet and address macros/functions
|
||||
//
|
||||
|
@ -191,6 +191,9 @@ int coap_handle_response_get_block(coap_context_t *context,
|
||||
void coap_block_delete_lg_xmit(coap_session_t *session,
|
||||
coap_lg_xmit_t *lg_xmit);
|
||||
|
||||
coap_tick_t coap_block_check_lg_xmit_timeouts(coap_session_t *session,
|
||||
coap_tick_t now);
|
||||
|
||||
/**
|
||||
* The function that does all the work for the coap_add_data_large*()
|
||||
* functions.
|
||||
|
@ -27,6 +27,12 @@ typedef struct coap_dtls_pki_t coap_dtls_pki_t;
|
||||
#ifndef COAP_DTLS_HINT_LENGTH
|
||||
#define COAP_DTLS_HINT_LENGTH 128
|
||||
#endif
|
||||
#ifndef COAP_DTLS_MAX_PSK_IDENTITY
|
||||
#define COAP_DTLS_MAX_PSK_IDENTITY 64
|
||||
#endif
|
||||
#ifndef COAP_DTLS_MAX_PSK
|
||||
#define COAP_DTLS_MAX_PSK 64
|
||||
#endif
|
||||
|
||||
typedef enum coap_dtls_role_t {
|
||||
COAP_DTLS_ROLE_CLIENT, /**< Internal function invoked for client */
|
||||
|
@ -24,34 +24,34 @@
|
||||
* Scalar type to represent different events, e.g. DTLS events or
|
||||
* retransmission timeouts.
|
||||
*/
|
||||
typedef unsigned int coap_event_t;
|
||||
|
||||
typedef enum coap_event_t {
|
||||
/**
|
||||
* (D)TLS events for COAP_PROTO_DTLS and COAP_PROTO_TLS
|
||||
*/
|
||||
#define COAP_EVENT_DTLS_CLOSED 0x0000
|
||||
#define COAP_EVENT_DTLS_CONNECTED 0x01DE
|
||||
#define COAP_EVENT_DTLS_RENEGOTIATE 0x01DF
|
||||
#define COAP_EVENT_DTLS_ERROR 0x0200
|
||||
COAP_EVENT_DTLS_CLOSED = 0x0000,
|
||||
COAP_EVENT_DTLS_CONNECTED = 0x01DE,
|
||||
COAP_EVENT_DTLS_RENEGOTIATE = 0x01DF,
|
||||
COAP_EVENT_DTLS_ERROR = 0x0200,
|
||||
|
||||
/**
|
||||
* TCP events for COAP_PROTO_TCP and COAP_PROTO_TLS
|
||||
*/
|
||||
#define COAP_EVENT_TCP_CONNECTED 0x1001
|
||||
#define COAP_EVENT_TCP_CLOSED 0x1002
|
||||
#define COAP_EVENT_TCP_FAILED 0x1003
|
||||
COAP_EVENT_TCP_CONNECTED = 0x1001,
|
||||
COAP_EVENT_TCP_CLOSED = 0x1002,
|
||||
COAP_EVENT_TCP_FAILED = 0x1003,
|
||||
|
||||
/**
|
||||
* CSM exchange events for reliable protocols only
|
||||
*/
|
||||
#define COAP_EVENT_SESSION_CONNECTED 0x2001
|
||||
#define COAP_EVENT_SESSION_CLOSED 0x2002
|
||||
#define COAP_EVENT_SESSION_FAILED 0x2003
|
||||
COAP_EVENT_SESSION_CONNECTED = 0x2001,
|
||||
COAP_EVENT_SESSION_CLOSED = 0x2002,
|
||||
COAP_EVENT_SESSION_FAILED = 0x2003,
|
||||
|
||||
/**
|
||||
* BLOCK2 receive errors
|
||||
* (Q-)BLOCK receive errors
|
||||
*/
|
||||
#define COAP_EVENT_PARTIAL_BLOCK 0x3001
|
||||
COAP_EVENT_PARTIAL_BLOCK = 0x3001
|
||||
} coap_event_t;
|
||||
|
||||
/**
|
||||
* Type for event handler functions that can be registered with a CoAP
|
||||
|
@ -88,7 +88,11 @@ COAP_STATIC_INLINE uint64_t coap_ticks_to_rt_us(coap_tick_t t) {
|
||||
#elif defined(RIOT_VERSION)
|
||||
#include <xtimer.h>
|
||||
|
||||
#ifdef XTIMER_HZ
|
||||
#define COAP_TICKS_PER_SECOND (XTIMER_HZ)
|
||||
#else /* XTIMER_HZ */
|
||||
#define COAP_TICKS_PER_SECOND (XTIMER_HZ_BASE)
|
||||
#endif /* XTIMER_HZ */
|
||||
|
||||
typedef uint64_t coap_tick_t;
|
||||
typedef int64_t coap_tick_diff_t;
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#ifndef _WIN32
|
||||
#include <sys/select.h>
|
||||
#include <sys/time.h>
|
||||
#endif
|
||||
#include <time.h>
|
||||
|
@ -299,7 +299,6 @@ typedef enum coap_pdu_code_t {
|
||||
COAP_REQUEST_CODE_PATCH = COAP_REQUEST_PATCH,
|
||||
COAP_REQUEST_CODE_IPATCH = COAP_REQUEST_IPATCH,
|
||||
|
||||
COAP_RESPONSE_CODE_OK = COAP_RESPONSE_CODE(200),
|
||||
COAP_RESPONSE_CODE_CREATED = COAP_RESPONSE_CODE(201),
|
||||
COAP_RESPONSE_CODE_DELETED = COAP_RESPONSE_CODE(202),
|
||||
COAP_RESPONSE_CODE_VALID = COAP_RESPONSE_CODE(203),
|
||||
|
@ -83,7 +83,8 @@ typedef void (*coap_method_handler_t)
|
||||
* variable of coap_str_const_t has to point to constant text, or point to data
|
||||
* within the allocated coap_str_const_t parameter.
|
||||
*
|
||||
* @param uri_path The string URI path of the new resource.
|
||||
* @param uri_path The string URI path of the new resource. The leading '/' is
|
||||
* not normally required - e.g. just "full/path/for/resource".
|
||||
* @param flags Flags for memory management (in particular release of
|
||||
* memory). Possible values:@n
|
||||
*
|
||||
|
@ -29,6 +29,7 @@
|
||||
#define CONFIG_BOOT_ROM_LOG_ALWAYS_ON 1
|
||||
#define CONFIG_ESPTOOLPY_BAUD_OTHER_VAL 115200
|
||||
#define CONFIG_ESPTOOLPY_FLASHMODE_DIO 1
|
||||
#define CONFIG_ESPTOOLPY_FLASH_SAMPLE_MODE_STR 1
|
||||
#define CONFIG_ESPTOOLPY_FLASHMODE "dio"
|
||||
#define CONFIG_ESPTOOLPY_FLASHFREQ_80M 1
|
||||
#define CONFIG_ESPTOOLPY_FLASHFREQ "80m"
|
||||
@ -340,6 +341,7 @@
|
||||
#define CONFIG_LWIP_GARP_TMR_INTERVAL 60
|
||||
#define CONFIG_LWIP_TCPIP_RECVMBOX_SIZE 32
|
||||
#define CONFIG_LWIP_DHCP_DOES_ARP_CHECK 1
|
||||
#define CONFIG_LWIP_DHCP_OPTIONS_LEN 68
|
||||
#define CONFIG_LWIP_DHCPS 1
|
||||
#define CONFIG_LWIP_DHCPS_LEASE_UNIT 60
|
||||
#define CONFIG_LWIP_DHCPS_MAX_STATION_NUM 8
|
||||
@ -450,6 +452,7 @@
|
||||
#define CONFIG_MDNS_TASK_AFFINITY 0x0
|
||||
#define CONFIG_MDNS_SERVICE_ADD_TIMEOUT_MS 2000
|
||||
#define CONFIG_MDNS_TIMER_PERIOD_MS 100
|
||||
#define CONFIG_MDNS_MULTIPLE_INSTANCE 1
|
||||
#define CONFIG_MQTT_PROTOCOL_311 1
|
||||
#define CONFIG_MQTT_TRANSPORT_SSL 1
|
||||
#define CONFIG_MQTT_TRANSPORT_WEBSOCKET 1
|
||||
@ -622,5 +625,5 @@
|
||||
#define CONFIG_TIMER_TASK_STACK_SIZE CONFIG_ESP_TIMER_TASK_STACK_SIZE
|
||||
#define CONFIG_TOOLPREFIX CONFIG_SDK_TOOLPREFIX
|
||||
#define CONFIG_UDP_RECVMBOX_SIZE CONFIG_LWIP_UDP_RECVMBOX_SIZE
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "3e370c4296"
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "b86fe0c66c"
|
||||
#define CONFIG_ARDUINO_IDF_BRANCH "master"
|
||||
|
@ -856,16 +856,35 @@ esp_err_t rmt_remove_channel_from_group(rmt_channel_t channel);
|
||||
|
||||
#if SOC_RMT_SUPPORT_TX_LOOP_COUNT
|
||||
/**
|
||||
* @brief Set loop count for RMT TX channel
|
||||
* @brief Set loop count threshold value for RMT TX channel
|
||||
*
|
||||
* When tx loop count reaches this value, an ISR callback will notify user
|
||||
*
|
||||
* @param channel RMT channel
|
||||
* @param count loop count
|
||||
* @param count loop count, 1 ~ 1023
|
||||
* @return
|
||||
* - ESP_ERR_INVALID_ARG Parameter error
|
||||
* - ESP_OK Success
|
||||
*/
|
||||
esp_err_t rmt_set_tx_loop_count(rmt_channel_t channel, uint32_t count);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Enable or disable the feature that when loop count reaches the threshold, RMT will stop transmitting.
|
||||
*
|
||||
* - When the loop auto-stop feature is enabled will halt RMT transmission after the loop count reaches a certain threshold
|
||||
* - When disabled, the RMT transmission continue indefinitely until halted by the users
|
||||
*
|
||||
* @note The auto-stop feature is implemented in hardware on particular targets (i.e. those with SOC_RMT_SUPPORT_TX_LOOP_AUTOSTOP defined).
|
||||
* Otherwise, the auto-stop feature is implemented in software via the interrupt.
|
||||
*
|
||||
* @param channel RMT channel
|
||||
* @param en enable bit
|
||||
* @return
|
||||
* - ESP_ERR_INVALID_ARG Parameter error
|
||||
* - ESP_OK Success
|
||||
*/
|
||||
esp_err_t rmt_enable_tx_loop_autostop(rmt_channel_t channel, bool en);
|
||||
#endif // SOC_RMT_SUPPORT_TX_LOOP_COUNT
|
||||
|
||||
/**
|
||||
* @brief Reset RMT TX/RX memory index.
|
||||
|
@ -0,0 +1,17 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace detect
|
||||
{
|
||||
typedef struct
|
||||
{
|
||||
int category; /*<! category index */
|
||||
float score; /*<! score of box */
|
||||
std::vector<int> box; /*<! [left_up_x, left_up_y, right_down_x, right_down_y] */
|
||||
std::vector<int> keypoint; /*<! [x1, y1, x2, y2, ...] */
|
||||
} result_t;
|
||||
}
|
||||
}
|
99
tools/sdk/esp32c3/include/esp-face/include/dl_define.hpp
Normal file
99
tools/sdk/esp32c3/include/esp-face/include/dl_define.hpp
Normal file
@ -0,0 +1,99 @@
|
||||
#pragma once
|
||||
|
||||
#include <climits>
|
||||
#include "sdkconfig.h"
|
||||
|
||||
#define DL_LOG_LATENCY_UNIT 0 /*<! - 1: cycle */
|
||||
/*<! - 0: us */
|
||||
#define DL_LOG_NN_LATENCY 0 /*<! - 1: print the latency of each parts of nn */
|
||||
/*<! - 0: mute */
|
||||
#define DL_LOG_LAYER_LATENCY 0 /*<! - 1: print the latency of each parts of layer */
|
||||
/*<! - 0: mute */
|
||||
|
||||
#if CONFIG_SPIRAM_SUPPORT || CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT || CONFIG_ESP32S3_SPIRAM_SUPPORT
|
||||
#define DL_SPIRAM_SUPPORT 1
|
||||
#else
|
||||
#define DL_SPIRAM_SUPPORT 0
|
||||
#endif
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
#define CONFIG_DEFAULT_ASSIGN_CORE \
|
||||
{ \
|
||||
} // TODO: 多核 task 完成时,改成默认 0,1
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
#define CONFIG_DEFAULT_ASSIGN_CORE \
|
||||
{ \
|
||||
}
|
||||
#elif CONFIG_IDF_TARGET_ESP32S3
|
||||
#define CONFIG_DEFAULT_ASSIGN_CORE \
|
||||
{ \
|
||||
} // TODO: 多核 task 完成时,改成默认 0,1
|
||||
#elif CONFIG_IDF_TARGET_ESP32C3
|
||||
#define CONFIG_DEFAULT_ASSIGN_CORE \
|
||||
{ \
|
||||
}
|
||||
#else
|
||||
#define CONFIG_DEFAULT_ASSIGN_CORE \
|
||||
{ \
|
||||
}
|
||||
#endif
|
||||
|
||||
#define DL_Q16_MIN (-32768)
|
||||
#define DL_Q16_MAX (32767)
|
||||
#define DL_Q8_MIN (-128)
|
||||
#define DL_Q8_MAX (127)
|
||||
|
||||
#ifndef DL_MAX
|
||||
#define DL_MAX(x, y) (((x) < (y)) ? (y) : (x))
|
||||
#endif
|
||||
|
||||
#ifndef DL_MIN
|
||||
#define DL_MIN(x, y) (((x) < (y)) ? (x) : (y))
|
||||
#endif
|
||||
|
||||
#ifndef DL_CLIP
|
||||
#define DL_CLIP(x, low, high) ((x) < (low)) ? (low) : (((x) > (high)) ? (high) : (x))
|
||||
#endif
|
||||
|
||||
#ifndef DL_ABS
|
||||
#define DL_ABS(x) ((x) < 0 ? (-(x)) : (x))
|
||||
#endif
|
||||
|
||||
#ifndef DL_RIGHT_SHIFT
|
||||
#define DL_RIGHT_SHIFT(x, shift) ((shift) > 0) ? ((x) >> (shift)) : ((x) << -(shift))
|
||||
#endif
|
||||
|
||||
#ifndef DL_LEFT_SHIFT
|
||||
#define DL_LEFT_SHIFT(x, shift) ((shift) > 0) ? ((x) << (shift)) : ((x) >> -(shift))
|
||||
#endif
|
||||
|
||||
namespace dl
|
||||
{
|
||||
typedef enum
|
||||
{
|
||||
Linear, /*<! Linear >*/
|
||||
ReLU, /*<! ReLU >*/
|
||||
LeakyReLU, /*<! LeakyReLU >*/
|
||||
PReLU, /*<! PReLU >*/
|
||||
// TODO: Sigmoid, /*<! Sigmoid >*/
|
||||
// TODO: Softmax, /*<! Softmax*/
|
||||
// TODO: TanH,
|
||||
// TODO: ReLU6
|
||||
} activation_type_t;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
PADDING_NOT_SET,
|
||||
PADDING_VALID, /*<! no padding >*/
|
||||
PADDING_SAME_BEGIN, /*<! SAME in MXNET style >*/
|
||||
PADDING_SAME_END, /*<! SAME in TensorFlow style >*/
|
||||
} padding_type_t;
|
||||
|
||||
typedef enum
|
||||
{
|
||||
CONSTANT,
|
||||
EDGE,
|
||||
REFLECT,
|
||||
SYMMETRIC,
|
||||
} padding_mode_t;
|
||||
} // namespace dl
|
439
tools/sdk/esp32c3/include/esp-face/include/image/dl_image.hpp
Normal file
439
tools/sdk/esp32c3/include/esp-face/include/image/dl_image.hpp
Normal file
@ -0,0 +1,439 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <math.h>
|
||||
#include <vector>
|
||||
#include "dl_define.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_math_matrix.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace image
|
||||
{
|
||||
typedef enum
|
||||
{
|
||||
IMAGE_RESIZE_BILINEAR = 0, /*<! Resize image by taking bilinear of four pixels */
|
||||
IMAGE_RESIZE_MEAN = 1, /*<! Resize image by taking mean of four pixels */
|
||||
IMAGE_RESIZE_NEAREST = 2 /*<! Resize image by taking the nearest pixel */
|
||||
} resize_type_t;
|
||||
|
||||
/**
|
||||
* @brief Convert RGB888 pixel to Gray.
|
||||
*
|
||||
* @param red red value
|
||||
* @param green green value
|
||||
* @param blue blue value
|
||||
* @return gray value
|
||||
*/
|
||||
inline uint8_t convert_pixel_rgb888_to_gray(int red, int green, int blue)
|
||||
{
|
||||
int temp = (red * 38 + green * 75 + blue * 15) >> 7;
|
||||
return DL_CLIP(temp, 0, 255);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Convert RGB565 pixel to RGB888.
|
||||
*
|
||||
* @tparam T supports all integer types
|
||||
* @param input pixel value in RGB565
|
||||
* @param output pixel value in RGB888
|
||||
*/
|
||||
template <typename T>
|
||||
inline void convert_pixel_rgb565_to_rgb888(uint16_t input, T *output)
|
||||
{
|
||||
output[0] = (input & 0x1F00) >> 5; // blue
|
||||
output[1] = ((input & 0x7) << 5) | ((input & 0xE000) >> 11); // green
|
||||
output[2] = input & 0xF8; // red
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Convert RGB565 pixel to Gray.
|
||||
*
|
||||
* @param input pixel value in RGB565
|
||||
* @return pixel value in Gray
|
||||
*/
|
||||
inline uint8_t convert_pixel_rgb565_to_gray(uint16_t input)
|
||||
{
|
||||
int blue = (input & 0x1F00) >> 5; // blue
|
||||
int green = ((input & 0x7) << 5) | ((input & 0xE000) >> 11); // green
|
||||
int red = input & 0xF8; // red
|
||||
|
||||
return convert_pixel_rgb888_to_gray(red, green, blue);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Crop a patch from image and resize and store to destination image.
|
||||
* If the cropping box is out of image, destination image will be padded with edge.
|
||||
*
|
||||
* The outer rectangle is the entire output image.
|
||||
* The inner rectangle is where the resized image will be stored.
|
||||
* In other world, this function could help you do padding while resize image.
|
||||
* ___________________________(dst_w)__________________
|
||||
* | ___________________________ |
|
||||
* | |(x_start, y_start) | |
|
||||
* | | | |
|
||||
* | | | |
|
||||
* (dst_h)| | | |
|
||||
* | | | |
|
||||
* | | | |
|
||||
* | |___________________________|(x_end, y_end) |
|
||||
* |____________________________________________________|
|
||||
*
|
||||
* @tparam T suppot all integer types
|
||||
* @param dst_image pointer of destination(output) image
|
||||
* @param dst_width destination image width
|
||||
* @param dst_channel destination image channel number
|
||||
* @param dst_y_start start y of resized image in destination image
|
||||
* @param dst_y_end end y of resized image in destination image
|
||||
* @param dst_x_start start x of resized image in destination image
|
||||
* @param dst_x_end end x of resized image in destination image
|
||||
* @param src_image pointer of source image
|
||||
* @param src_height source image height
|
||||
* @param src_width source image width
|
||||
* @param src_channel source image channel
|
||||
* @param src_y_start start y of resized image in source image
|
||||
* @param src_y_end end y of resized image in source image
|
||||
* @param src_x_start start x of resized image in source image
|
||||
* @param src_x_end end x of resized image in source image
|
||||
* @param resize_type one of IMAGE_RESIZE_BILINEAR or IMAGE_RESIZE_MEAN or IMAGE_RESIZE_NEAREST
|
||||
* @param shift_left bit left shift number implemented on output
|
||||
*/
|
||||
template <typename T>
|
||||
void crop_and_resize(T *dst_image,
|
||||
int dst_width,
|
||||
int dst_channel,
|
||||
int dst_y_start, int dst_y_end,
|
||||
int dst_x_start, int dst_x_end,
|
||||
uint16_t *src_image,
|
||||
int src_height,
|
||||
int src_width,
|
||||
int src_channel,
|
||||
int src_y_start, int src_y_end,
|
||||
int src_x_start, int src_x_end,
|
||||
resize_type_t resize_type = IMAGE_RESIZE_NEAREST,
|
||||
int shift_left = 0);
|
||||
|
||||
/**
|
||||
* @brief Crop a patch from image and resize and store to destination image.
|
||||
* If the cropping box is out of image, destination image will be padded with edge.
|
||||
*
|
||||
* The outer rectangle is the entire output image.
|
||||
* The inner rectangle is where the resized image will be stored.
|
||||
* In other world, this function could help you do padding while resize image.
|
||||
* ___________________________(dst_w)__________________
|
||||
* | ___________________________ |
|
||||
* | |(x_start, y_start) | |
|
||||
* | | | |
|
||||
* | | | |
|
||||
* (dst_h)| | | |
|
||||
* | | | |
|
||||
* | | | |
|
||||
* | |___________________________|(x_end, y_end) |
|
||||
* |____________________________________________________|
|
||||
*
|
||||
* @tparam T suppot all integer types
|
||||
* @param dst_image pointer of destination(output) image
|
||||
* @param dst_width destination image width
|
||||
* @param dst_channel destination image channel number
|
||||
* @param dst_y_start start y of resized image in destination image
|
||||
* @param dst_y_end end y of resized image in destination image
|
||||
* @param dst_x_start start x of resized image in destination image
|
||||
* @param dst_x_end end x of resized image in destination image
|
||||
* @param src_image pointer of source image
|
||||
* @param src_height source image height
|
||||
* @param src_width source image width
|
||||
* @param src_channel source image channel
|
||||
* @param src_y_start start y of resized image in source image
|
||||
* @param src_y_end end y of resized image in source image
|
||||
* @param src_x_start start x of resized image in source image
|
||||
* @param src_x_end end x of resized image in source image
|
||||
* @param resize_type one of IMAGE_RESIZE_BILINEAR or IMAGE_RESIZE_MEAN or IMAGE_RESIZE_NEAREST
|
||||
* @param shift_left bit left shift number implemented on output
|
||||
*/
|
||||
template <typename T>
|
||||
void crop_and_resize(T *dst_image,
|
||||
int dst_width,
|
||||
int dst_channel,
|
||||
int dst_y_start, int dst_y_end,
|
||||
int dst_x_start, int dst_x_end,
|
||||
uint8_t *src_image,
|
||||
int src_height,
|
||||
int src_width,
|
||||
int src_channel,
|
||||
int src_y_start, int src_y_end,
|
||||
int src_x_start, int src_x_end,
|
||||
resize_type_t resize_type = IMAGE_RESIZE_NEAREST,
|
||||
int shift_left = 0);
|
||||
|
||||
/**
|
||||
* @brief Draw a filled rectangle on RGB888 image.
|
||||
*
|
||||
* @param image pointer of input image
|
||||
* @param image_height height of input image
|
||||
* @param image_width width of input image
|
||||
* @param x1 left up corner x
|
||||
* @param y1 left up corner y
|
||||
* @param x2 right bottom corner x
|
||||
* @param y2 right bottom corner y
|
||||
* @param color 0x 00| 00| 00| 00
|
||||
* reserved|channel 0|channel 1|channel 2
|
||||
*/
|
||||
void draw_filled_rectangle(uint8_t *image, const uint32_t image_height, const uint32_t image_width,
|
||||
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
|
||||
const uint32_t color = 0x00FF0000);
|
||||
|
||||
/**
|
||||
* @brief Draw a filled rectangle on RGB565 image.
|
||||
*
|
||||
* @param image pointer of input image
|
||||
* @param image_height height of input image
|
||||
* @param image_width width of input image
|
||||
* @param x1 left up corner x
|
||||
* @param y1 left up corner y
|
||||
* @param x2 right bottom corner x
|
||||
* @param y2 right bottom corner y
|
||||
* @param color 0b 000| 00000| 00000| 000
|
||||
* channel 1[2:0]|channel 0|channel 2|channel 1[5:3]
|
||||
*/
|
||||
void draw_filled_rectangle(uint16_t *image, const uint32_t image_height, const uint32_t image_width,
|
||||
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
|
||||
const uint16_t color = 0b0001111100000000);
|
||||
|
||||
/**
|
||||
* @brief Draw a point on RGB888 image.
|
||||
*
|
||||
* @param image pointer of input image
|
||||
* @param image_height height of input image
|
||||
* @param image_width width of input image
|
||||
* @param x point x
|
||||
* @param y point y
|
||||
* @param size size of point
|
||||
* @param color 0x 00| 00| 00| 00
|
||||
* reserved|channel 0|channel 1|channel 2
|
||||
*/
|
||||
void draw_point(uint8_t *image, const uint32_t image_height, const uint32_t image_width,
|
||||
const uint32_t x, const uint32_t y, const uint32_t size,
|
||||
const uint32_t color = 0x00FF0000);
|
||||
|
||||
/**
|
||||
* @brief Draw a point on RGB565 image.
|
||||
*
|
||||
* @param image pointer of input image
|
||||
* @param image_height height of input image
|
||||
* @param image_width width of input image
|
||||
* @param x point x
|
||||
* @param y point y
|
||||
* @param size size of point
|
||||
* @param color 0b 000| 00000| 00000| 000
|
||||
* channel 1[2:0]|channel 0|channel 2|channel 1[5:3]
|
||||
*/
|
||||
void draw_point(uint16_t *image, const uint32_t image_height, const uint32_t image_width,
|
||||
const uint32_t x, const uint32_t y, const uint32_t size,
|
||||
uint16_t color = 0b0001111100000000);
|
||||
|
||||
/**
|
||||
* @brief Draw a hollow rectangle on RGB888 image.
|
||||
*
|
||||
* @param image pointer of input image
|
||||
* @param image_height height of input image
|
||||
* @param image_width width of input image
|
||||
* @param x1 left up corner x
|
||||
* @param y1 left up corner y
|
||||
* @param x2 right bottom corner x
|
||||
* @param y2 right bottom corner y
|
||||
* @param color 0x 00| 00| 00| 00
|
||||
* reserved|channel 0|channel 1|channel 2
|
||||
*/
|
||||
void draw_hollow_rectangle(uint8_t *image, const uint32_t image_height, const uint32_t image_width,
|
||||
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
|
||||
uint32_t color = 0x00FF0000);
|
||||
|
||||
/**
|
||||
* @brief Draw a hollow rectangle on RGB565 image.
|
||||
*
|
||||
* @param image pointer of input image
|
||||
* @param image_height height of input image
|
||||
* @param image_width width of input image
|
||||
* @param x1 left up corner x
|
||||
* @param y1 left up corner y
|
||||
* @param x2 right bottom corner x
|
||||
* @param y2 right bottom corner y
|
||||
* @param color 0b 000| 00000| 00000| 000
|
||||
* channel 1[2:0]|channel 0|channel 2|channel 1[5:3]
|
||||
*/
|
||||
void draw_hollow_rectangle(uint16_t *image, const uint32_t image_height, const uint32_t image_width,
|
||||
uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2,
|
||||
const uint16_t color = 0b0001111100000000);
|
||||
|
||||
/**
|
||||
* @brief Detect target moving by activated detection point number. Each cross in the figure below is a detection point.
|
||||
* Once abs(frame_1_detection_point[i] - frame_2_detection_point[i]) > threshold, this detection point is activated.
|
||||
* This function will return the number of activated detection point.
|
||||
*
|
||||
* __stride__________________________
|
||||
* | | | | |
|
||||
* stride | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________| |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________| height
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________| |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________|___|___
|
||||
* | |
|
||||
* |__________width___________|
|
||||
* | |
|
||||
*
|
||||
* Time consumption:
|
||||
* Frame shape = (240, 240)
|
||||
* Both frame are in PSRAM
|
||||
* On ESP32-S3 with CPU 240MHz, QSPI 80MHz
|
||||
*
|
||||
* stride latency
|
||||
* 1 28316us
|
||||
* 2 8770us
|
||||
* 4 3622us
|
||||
* 8 1990us
|
||||
* 16 880us
|
||||
* 32 260us
|
||||
*
|
||||
*
|
||||
* In a application, outside this function, threshold of activated detection point number is needed.
|
||||
* Once activated detection point number > number_threshold, this two frame are judged target moved.
|
||||
* How to determine the number_threshold?
|
||||
* Let's assume that the minimize shape of target is (target_min_height, target_max_width).
|
||||
* Then, the number_threshold = [target_min_height / stride] * [target_max_width / stride] * ratio,
|
||||
* where ratio is in (0, 1), the smaller the ratio is, the more sensitive the detector is, the more false detected.
|
||||
*
|
||||
*
|
||||
* @param f1 one frame in RGB565
|
||||
* @param f2 another frame in RGB565
|
||||
* @param height height of frame
|
||||
* @param width width of frame
|
||||
* @param stride stride of detection point, the smaller the stride is, the more reliable the detector is.
|
||||
* @param threshold activation threshold of each detection point
|
||||
* @return activated detection point number
|
||||
*/
|
||||
uint32_t get_moving_point_number(uint16_t *f1, uint16_t *f2, const uint32_t height, const uint32_t width, const uint32_t stride, const uint32_t threshold = 5);
|
||||
|
||||
/**
|
||||
* @brief Detect target moving by activated detection point number. Each cross in the figure below is a detection point.
|
||||
* Once abs(frame_1_detection_point[i] - frame_2_detection_point[i]) > threshold, this detection point is activated.
|
||||
* This function will return the number of activated detection point.
|
||||
*
|
||||
* __stride__________________________
|
||||
* | | | | |
|
||||
* stride | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________| |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________| height
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________| |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* | | | | |
|
||||
* |________|________|________|___|___
|
||||
* | |
|
||||
* |__________width___________|
|
||||
* | |
|
||||
*
|
||||
*
|
||||
* In a application, outside this function, threshold of activated detection point number is needed.
|
||||
* Once activated detection point number > number_threshold, this two frame are judged target moved.
|
||||
* How to determine the number_threshold?
|
||||
* Let's assume that the minimize shape of target is (target_min_height, target_max_width).
|
||||
* Then, the number_threshold = [target_min_height / stride] * [target_max_width / stride] * ratio,
|
||||
* where ratio is in (0, 1), the smaller the ratio is, the more sensitive the detector is, the more false detected.
|
||||
*
|
||||
*
|
||||
* @param f1 one frame in RGB888
|
||||
* @param f2 another frame in RGB888
|
||||
* @param height height of frame
|
||||
* @param width width of frame
|
||||
* @param stride stride of detection point, the smaller the stride is, the more reliable the detector is.
|
||||
* @param threshold activation threshold of each detection point
|
||||
* @return activated detection point number
|
||||
*/
|
||||
uint32_t get_moving_point_number(uint8_t *f1, uint8_t *f2, const uint32_t height, const uint32_t width, const uint32_t stride, const uint32_t threshold = 5);
|
||||
|
||||
/**
|
||||
* @brief Apply an affine transformation to an image.
|
||||
*
|
||||
* @tparam T
|
||||
* @param input the input image.
|
||||
* @param output the output image.
|
||||
* @param M_inv the inverse transformation matrix.
|
||||
*/
|
||||
template <typename T>
|
||||
void warp_affine(dl::Tensor<T> *input, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);
|
||||
|
||||
/**
|
||||
* @brief Apply an affine transformation to an image.
|
||||
*
|
||||
* @tparam T
|
||||
* @param input the pointer of the input image.
|
||||
* @param shape the shape of the input image.
|
||||
* @param output the output image.
|
||||
* @param M_inv the inverse transformation matrix.
|
||||
*/
|
||||
template <typename T>
|
||||
void warp_affine(uint16_t *input, std::vector<int> shape, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);
|
||||
|
||||
/**
|
||||
* @brief Get the otsu thresh object.
|
||||
*
|
||||
* @param image the gray image.
|
||||
* @return uint8_t the otsu thresh.
|
||||
*/
|
||||
uint8_t get_otsu_thresh(Tensor<uint8_t> &image);
|
||||
|
||||
/**
|
||||
* @brief Convert RGB image to gray image
|
||||
*
|
||||
* @param image input image
|
||||
* @param bgr true: the image is in BGR format
|
||||
* false: the image is in RGB format
|
||||
* @return Tensor<uint8_t>* output image in gray format
|
||||
*/
|
||||
Tensor<uint8_t> *rgb2gray(Tensor<uint8_t> &image, bool bgr = false);
|
||||
|
||||
/**
|
||||
* @brief Convert RGB image to LAB image
|
||||
*
|
||||
* @param image input image
|
||||
* @param bgr true: the image is in BGR format
|
||||
* false: the image is in RGB format
|
||||
* @param fast true: use the fast alogrithm, but the accuracy will be reduced
|
||||
* false: do not use the fast alogrithm
|
||||
* @return Tensor<uint8_t>* output image in LAB foramt
|
||||
*/
|
||||
Tensor<uint8_t> *rgb2lab(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);
|
||||
|
||||
/**
|
||||
* @brief Convert RGB image to HSV image
|
||||
*
|
||||
* @param image input image
|
||||
* @param bgr true: the image is in BGR format
|
||||
* false: the image is in RGB format
|
||||
* @param fast true: use the fast alogrithm, but the accuracy will be reduced
|
||||
* false: do not use the fast alogrithm
|
||||
* @return Tensor<uint8_t>* output image in HSV format
|
||||
*/
|
||||
Tensor<uint8_t> *rgb2hsv(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);
|
||||
|
||||
} // namespace image
|
||||
} // namespace dl
|
@ -0,0 +1,145 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_add2d.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Activation(Add2D(input0, input1)).
|
||||
* NOTE: addition is element-wise, i.e., output[i,j,k] = input0[i,j,k] + input1[i,j,k]
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Add2D : public Layer
|
||||
{
|
||||
private:
|
||||
const Activation<feature_t> *activation; /*<! activation of add2d, if you don't specify anything, no activation is applied >*/
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of add2d >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of add2d >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Add2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param activation activation of add2d, if you don't specify anything, no activation is applied
|
||||
* @param name name of add2d
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Add2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = "Add2D", bool inplace = false) : Layer(name),
|
||||
activation(activation),
|
||||
output_exponent(output_exponent),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({}) {}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Add2D object
|
||||
*/
|
||||
~Add2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
* NOTE: input0.shape must equal to input1.shape.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(input0.shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Add2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Add2D operation.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @return Tensor<feature_t>& added result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::add2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "add2d");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::add2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "add2d");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,161 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_avg_pool2d.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief AvgPool2D(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class AvgPool2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of AvgPool2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of AvgPool2D >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new AvgPool2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param name name of layer
|
||||
*/
|
||||
AvgPool2D(const int output_exponent,
|
||||
const std::vector<int> filter_shape,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = "AvgPool2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter_shape(filter_shape),
|
||||
padding_type(padding_type),
|
||||
padding(padding),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the AvgPool2D object.
|
||||
*
|
||||
*/
|
||||
~AvgPool2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
|
||||
this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& AvgPool2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call AvgPool2D operation
|
||||
*
|
||||
* @param input as an input
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @return AvgPool2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::avg_pool2d(*this->output, input, this->padding, this->filter_shape, this->stride_y, this->stride_x);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "avg_pool2d");
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,56 @@
|
||||
#pragma once
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_tool_cache.hpp"
|
||||
#include <iostream>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Base class for layer.
|
||||
*
|
||||
*/
|
||||
class Layer
|
||||
{
|
||||
public:
|
||||
char *name; /*<! name of layer >*/
|
||||
|
||||
/**
|
||||
* @brief Construct a new Layer object.
|
||||
*
|
||||
* @param name name of layer.
|
||||
*/
|
||||
Layer(const char *name = NULL);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Layer object. Return resource.
|
||||
*
|
||||
*/
|
||||
~Layer();
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
||||
|
||||
#if DL_LOG_LAYER_LATENCY
|
||||
/**
|
||||
* @brief Initialize.
|
||||
*/
|
||||
#define DL_LOG_LAYER_LATENCY_INIT() dl::tool::Latency latency
|
||||
|
||||
/**
|
||||
* @brief Time starts.
|
||||
*/
|
||||
#define DL_LOG_LAYER_LATENCY_START() latency.start()
|
||||
|
||||
/**
|
||||
* @brief Time ends and printed.
|
||||
*/
|
||||
#define DL_LOG_LAYER_LATENCY_END(prefix, key) \
|
||||
latency.end(); \
|
||||
latency.print(prefix, key)
|
||||
#else
|
||||
#define DL_LOG_LAYER_LATENCY_INIT()
|
||||
#define DL_LOG_LAYER_LATENCY_START()
|
||||
#define DL_LOG_LAYER_LATENCY_END(prefix, key)
|
||||
#endif
|
@ -0,0 +1,139 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <vector>
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
#include "dl_nn_concat.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Concat(input1, input2, input3, ...).
|
||||
*
|
||||
* @tparam feature_t support all kinds of integer and float data type
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Concat : Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
int axis; /*<! The axis along which the Tensor will be concatenated. >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Concat >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Concat >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Concat object.
|
||||
*
|
||||
* @param name name of layer
|
||||
* @param axis The axis along which the Tensor will be concatenated.
|
||||
*/
|
||||
Concat(int axis, const char *name = "Concat") : Layer(name), axis(axis), output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Concat object
|
||||
*/
|
||||
~Concat()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Collect inputs' channel and memory offset, called in Model.build().
|
||||
*
|
||||
* @param args pointers of concatenated Tensor
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(std::vector<Tensor<feature_t> *> args, bool print_shape = false)
|
||||
{
|
||||
assert(args.size() > 1);
|
||||
int shape_size = args[0]->shape.size();
|
||||
|
||||
if (this->axis < 0)
|
||||
{
|
||||
this->axis = shape_size + this->axis;
|
||||
}
|
||||
assert((this->axis < shape_size) && (this->axis > -1));
|
||||
|
||||
int output_shape_axis = args[0]->shape[this->axis];
|
||||
|
||||
for (int i = 1; i < args.size(); i++)
|
||||
{
|
||||
assert(shape_size == args[i]->shape.size());
|
||||
assert(args[i]->exponent == args[i - 1]->exponent);
|
||||
output_shape_axis += args[i]->shape[this->axis];
|
||||
|
||||
for (int j = 0; j < shape_size; j++)
|
||||
{
|
||||
if (j != this->axis)
|
||||
{
|
||||
assert(args[i]->shape[j] == args[i - 1]->shape[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->output_exponent = args[0]->exponent;
|
||||
this->output_shape = args[0]->shape;
|
||||
this->output_shape[this->axis] = output_shape_axis;
|
||||
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Concat operation
|
||||
*
|
||||
* @param inputs the pointers of inputs
|
||||
* @param free_inputs true: free the inputs after call
|
||||
* false: do not free inputs
|
||||
* @return Tensor<feature_t>& concat result
|
||||
*/
|
||||
Tensor<feature_t> &call(std::vector<Tensor<feature_t> *> inputs, bool free_inputs = false)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::concat(*this->output, inputs, this->axis, free_inputs);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "concat");
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Concat result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,179 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <vector>
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Concat2D(input1, input2, input3, ...).
|
||||
*
|
||||
* @tparam feature_t support all kinds of integer and float data type
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Concat2D : Layer
|
||||
{
|
||||
private:
|
||||
std::vector<Tensor<feature_t> *> output_vec; /*<! pointers of concatenated inputs >*/
|
||||
std::vector<int> offset; /*<! memory offset of each concatenated inputs in entire element >*/
|
||||
std::vector<int> channel; /*<! channel of concatenated inputs >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Concat2D >*/
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new Concat2D object.
|
||||
*
|
||||
* @param name name of layer
|
||||
*/
|
||||
Concat2D(const char *name = NULL) : Layer(name) {
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Concat2D object
|
||||
*/
|
||||
~Concat2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Collect inputs' channel and memory offset, called in Model.build().
|
||||
*
|
||||
* @param args pointers of concatenated Tensor
|
||||
*/
|
||||
void build(std::vector<Tensor<feature_t> *> args)
|
||||
{
|
||||
assert(args.size() > 0);
|
||||
|
||||
this->output_vec = args;
|
||||
|
||||
this->offset = std::vector<int>(args.size());
|
||||
this->channel = std::vector<int>(args.size());
|
||||
|
||||
this->output_exponent = args[0]->exponent;
|
||||
this->offset[0] = 0;
|
||||
this->channel[0] = args[0]->shape[2];
|
||||
std::vector<int> output_shape = args[0]->shape;
|
||||
|
||||
for (int i = 1; i < args.size(); i++)
|
||||
{
|
||||
assert(output_shape[0] == args[i]->shape[0]); // height
|
||||
assert(output_shape[1] == args[i]->shape[1]); // width
|
||||
// assert(this->output_exponent == args[i]->exponent); // exponent
|
||||
|
||||
this->offset[i] = output_shape[2];
|
||||
this->channel[i] = args[i]->shape[2];
|
||||
output_shape[2] += args[i]->shape[2];
|
||||
}
|
||||
this->output->set_shape(output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Concat2d result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the maximum padding among inputs and output-> Then, set to this->output. Called at the end of Model.build().
|
||||
* NOTE: Some special situations like C = Concat2D_1(A, B), E = Concat2D_2(C, D), where A, B, C, D, E are Tensor.
|
||||
* For avoiding memory copy, we apply an entire element for E, and take it apart for A, B, D.
|
||||
* A, B, C, D and E will become other layer's inputs so that result different size of padding.
|
||||
* For get the maximum padding, we should call at the end of Model.build(),
|
||||
* Concat2D_1.backward(); // max_padding_temp = get_max_padding(A, B, C), padding of A, B and C are set to max_padding_temp.
|
||||
* Concat2D_2.backward(); // max_padding = get_max_padding(max_padding_temp, get_max_padding(D, E)) , padding of C, D and E are set to max_padding.
|
||||
* However, padding of A and B is still max_padding_temp.
|
||||
* Concat2D_1.backward(); // padding of A and B are set to max_padding.
|
||||
* Or,
|
||||
* Concat2D_2.backward();
|
||||
* Concat2D_1.backward();
|
||||
* Concat2D_2.backward();
|
||||
*/
|
||||
void backward()
|
||||
{
|
||||
std::vector<int> max_padding = this->output->padding;
|
||||
int max_channel_with_padding = this->output->shape_with_padding[2];
|
||||
for (int i = 0; i < this->output_vec.size(); i++)
|
||||
{
|
||||
for (int j = 0; j < max_padding.size(); j++)
|
||||
{
|
||||
max_padding[j] = DL_MAX(max_padding[j], this->output_vec[i]->padding[j]);
|
||||
}
|
||||
max_channel_with_padding = DL_MAX(max_channel_with_padding, this->output_vec[i]->shape_with_padding[2]);
|
||||
}
|
||||
|
||||
this->output->set_padding_size(max_padding);
|
||||
this->output->shape_with_padding[2] = max_channel_with_padding;
|
||||
for (int i = 0; i < this->output_vec.size(); i++)
|
||||
{
|
||||
this->output_vec[i]->set_padding_size(max_padding);
|
||||
this->output_vec[i]->shape_with_padding[2] = max_channel_with_padding;
|
||||
#if CONFIG_DEBUG_MODE
|
||||
assert(this->output->shape_with_padding[0] == this->output_vec[i]->shape_with_padding[0]);
|
||||
assert(this->output->shape_with_padding[1] == this->output_vec[i]->shape_with_padding[1]);
|
||||
assert(this->output->shape_with_padding[2] == this->output_vec[i]->shape_with_padding[2]);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calloc an entire element for concatnate result. Take the entire element apart and deliver element pointers to concatenated layer.
|
||||
* NOTE: For example, C = Concat2D(A, B). We apply an entire element for C and deliver two element pointers to A and B.
|
||||
* Let's assume that A result is produced first. We should call Concat2D.calloc_element() just before A result is produced
|
||||
* to make sure the element of A is ready and could be filled.
|
||||
*/
|
||||
void calloc_element()
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->calloc_element();
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
for (int i = 0; i < this->offset.size(); i++)
|
||||
{
|
||||
this->output_vec[i]->element = this->output->element + this->offset[i];
|
||||
this->output_vec[i]->set_auto_free(false);
|
||||
}
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "deliver");
|
||||
}
|
||||
|
||||
void apply_element()
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
for (int i = 0; i < this->offset.size(); i++)
|
||||
{
|
||||
this->output_vec[i]->element = this->output->element + this->offset[i];
|
||||
this->output_vec[i]->set_auto_free(false);
|
||||
}
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "deliver");
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,186 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_nn_conv2d.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Activation(Conv2D(input, filter) + bias).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
|
||||
* - int16_t: for int16 quantization and int8 per-channel quantization
|
||||
* - int8_t: for int8 per-tensor quantization
|
||||
*/
|
||||
template <typename feature_t, typename bias_t = feature_t>
|
||||
class Conv2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const Filter<feature_t> *filter; /*<! filter of Conv2D >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
const Bias<bias_t> *bias; /*<! bias of Conv2D, if you don't specify anything, no bias is added >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of Conv2D, if you don't specify anything, no activation is applied >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Conv2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Conv2D >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Conv2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param filter filter of Conv2D
|
||||
* @param bias bias of Conv2D, if you don't specify anything, no bias is added
|
||||
* @param activation activation of Conv2D, if you don't specify anything, no activation is applied
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param name name of layer
|
||||
*/
|
||||
Conv2D(const int output_exponent,
|
||||
const Filter<feature_t> *filter,
|
||||
const Bias<bias_t> *bias = NULL,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = "Conv2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter(filter),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type),
|
||||
bias(bias),
|
||||
activation(activation),
|
||||
padding(padding),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Conv2D object.
|
||||
*
|
||||
*/
|
||||
~Conv2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output padding and input padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
assert(this->filter->shape.size() == 4);
|
||||
assert(input.shape[2] == this->filter->shape[2]);
|
||||
|
||||
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, true, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Conv2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Conv2D operation
|
||||
*
|
||||
* @param input as an input.
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return Conv2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::conv2d(*this->output, input, this->padding, *(this->filter), this->stride_y, this->stride_x, this->bias, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "conv2d");
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Preload the filter to Cache.
|
||||
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is doing calculation.
|
||||
*/
|
||||
void preload()
|
||||
{
|
||||
size_t size = sizeof(feature_t);
|
||||
int shape_size = this->filter->shape.size();
|
||||
for (int i = 0; i < shape_size; ++i)
|
||||
{
|
||||
size *= filter->shape[i];
|
||||
}
|
||||
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,188 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_nn_depthwise_conv2d.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Activation(DepthwiseConv2D(filter, input) + bias).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
|
||||
* - int16_t: for int16 quantization and int8 per-channel quantization
|
||||
* - int8_t: for int8 per-tensor quantization
|
||||
*/
|
||||
template <typename feature_t, typename bias_t = feature_t>
|
||||
class DepthwiseConv2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const Filter<feature_t> *filter; /*<! filter of DepthwiseConv2D >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
const Bias<bias_t> *bias; /*<! bias of DepthwiseConv2D, if you don't specify anything, no bias is added >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of DepthwiseConv2D, if you don't specify anything, no activation is applied >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of DepthwiseConv2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of DepthwiseConv2D >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new DepthwiseConv2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param filter filter of DepthwiseConv2D
|
||||
* @param bias bias of DepthwiseConv2D, if you don't specify anything, no bias is added
|
||||
* @param activation activation of DepthwiseConv2D, if you don't specify anything, no activation is applied
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y - stride in height
|
||||
* @param stride_x - stride in width
|
||||
* @param name name of layer
|
||||
*/
|
||||
DepthwiseConv2D(const int output_exponent,
|
||||
const Filter<feature_t> *filter,
|
||||
const Bias<bias_t> *bias = NULL,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = "DepthwiseConv2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter(filter),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type),
|
||||
bias(bias),
|
||||
activation(activation),
|
||||
padding(padding),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the DepthwiseConv2D object.
|
||||
*
|
||||
*/
|
||||
~DepthwiseConv2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
assert(this->filter->shape.size() == 4);
|
||||
assert(input.shape[2] == this->filter->shape[2]);
|
||||
|
||||
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& DepthwiseConv2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call DepthwiseConv2D operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return DepthwiseConv2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::depthwise_conv2d(*this->output, input, this->padding, *(this->filter), this->stride_y, this->stride_x, this->bias, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "depthwise_conv2d");
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Preload the filter to Cache.
|
||||
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is calculating.
|
||||
*/
|
||||
void preload()
|
||||
{
|
||||
size_t size = sizeof(feature_t);
|
||||
int shape_size = this->filter->shape.size();
|
||||
for (int i = 0; i < shape_size; ++i)
|
||||
{
|
||||
size *= filter->shape[i];
|
||||
}
|
||||
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,128 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class ExpandDims : public Layer
|
||||
{
|
||||
private:
|
||||
std::vector<int> output_shape; /*<! output shape of ExpandDims >*/
|
||||
std::vector<int> axis; /*<! position where the new axis is placed. >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of ExpandDims >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
|
||||
public:
|
||||
int output_exponent;
|
||||
|
||||
/**
|
||||
* @brief Construct a new ExpandDims object
|
||||
*
|
||||
* @param axis position where the new axis is placed.
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
ExpandDims(std::vector<int> axis, const char *name = "ExpandDims", bool inplace = false) : Layer(name),
|
||||
axis(axis), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the ExpandDims object
|
||||
*
|
||||
*/
|
||||
~ExpandDims()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
*
|
||||
* @param input as an input.
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& ExpandDims result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief call ExpandDims opeartion
|
||||
*
|
||||
* @param input
|
||||
* @return Tensor<feature_t>& ExpandDims result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_shape(this->output_shape);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,120 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Flatten : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Flatten >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Flatten >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Flatten object
|
||||
*
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Flatten(const char *name = "Flatten", bool inplace = false) : Layer(name), inplace(inplace), output_shape({})
|
||||
{}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Flatten object
|
||||
*
|
||||
*/
|
||||
~Flatten()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
this->output_shape = {input.get_size()};
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Flatten result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Flatten operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @return Tensor<feature_t>& Flatten result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->flatten();
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->flatten();
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,167 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_nn_fully_connected.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
|
||||
* - int16_t: for int16 quantization and int8 per-channel quantization
|
||||
* - int8_t: for int8 per-tensor quantization
|
||||
*/
|
||||
template <typename feature_t, typename bias_t = feature_t>
|
||||
class FullyConnected : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const bool flatten; /*<! true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim] >*/
|
||||
const Filter<feature_t> *filter; /*<! filter of FullyConnected >*/
|
||||
const Bias<bias_t> *bias; /*<! bias of FullyConnected, if you don't specify anything, no bias is added >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of FullyConnected, if you don't specify anything, no activation is applied >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of FullyConnected >*/
|
||||
std::vector<int> output_shape; /*<! output shape of FullyConnected >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new FullyConnected object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param name name of layer
|
||||
*/
|
||||
FullyConnected(const int output_exponent,
|
||||
const Filter<feature_t> *filter,
|
||||
const Bias<bias_t> *bias = NULL,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const bool flatten = true,
|
||||
const char *name = "FullyConnected") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
flatten(flatten),
|
||||
filter(filter),
|
||||
bias(bias),
|
||||
activation(activation),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the FullyConnected object.
|
||||
*
|
||||
*/
|
||||
~FullyConnected()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output padding and input padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(this->filter->shape.size() == 4);
|
||||
assert(this->filter->shape[0] == 1);
|
||||
assert(this->filter->shape[1] == 1);
|
||||
if (this->flatten)
|
||||
{
|
||||
assert(input.get_size() == this->filter->shape[2]);
|
||||
this->output_shape = {this->filter->shape[3]};
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(input.shape.back() == this->filter->shape[2]);
|
||||
this->output_shape = input.shape;
|
||||
this->output_shape[this->output_shape.size() - 1] = this->filter->shape[3];
|
||||
}
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& FullyConnected result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call FullyConnected operation
|
||||
*
|
||||
* @param input as an input.
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return FullyConnected result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::fully_connected(*this->output, input, *(this->filter), this->bias, this->activation, this->flatten, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "fully_connected");
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Preload the filter to Cache.
|
||||
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is doing calculation.
|
||||
*/
|
||||
void preload()
|
||||
{
|
||||
size_t size = sizeof(feature_t);
|
||||
int shape_size = this->filter->shape.size();
|
||||
for (int i = 0; i < shape_size; ++i)
|
||||
{
|
||||
size *= filter->shape[i];
|
||||
}
|
||||
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,126 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_global_avg_pool2d.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief GlobalAveragePool2D(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class GlobalAveragePool2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
std::vector<int> output_shape; /*<! output shape of GlobalAveragePool2D >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of GlobalAveragePool2D >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new GlobalAveragePool2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param name name of layer
|
||||
*/
|
||||
GlobalAveragePool2D(const int output_exponent, const char *name = "GlobalAveragePool2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
output_shape({})
|
||||
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the GlobalAveragePool2D object.
|
||||
*
|
||||
*/
|
||||
~GlobalAveragePool2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
this->output_shape = output_shape;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& GlobalAveragePool2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call GlobalAveragePool2D operation
|
||||
*
|
||||
* @param input as an input
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return GlobalAveragePool2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::global_avg_pool2d(*this->output, input);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "global_avg_pool2d");
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,121 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_global_max_pool2d.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief GlobalMaxPool2D(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class GlobalMaxPool2D : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output ptr of GlobalMaxPool2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of GlobalMaxPool2D >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new GlobalMaxPool2D object.
|
||||
*
|
||||
* @param name name of layer
|
||||
*/
|
||||
GlobalMaxPool2D(const char *name = "GlobalMaxPool2D") : Layer(name), output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the GlobalMaxPool2D object.
|
||||
*
|
||||
*/
|
||||
~GlobalMaxPool2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
this->output->set_exponent(input.exponent);
|
||||
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
this->output_shape = output_shape;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& GlobalMaxPool2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call GlobalMaxPool2D operation
|
||||
*
|
||||
* @param input as an input
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return GlobalMaxPool2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::global_max_pool2d(*this->output, input);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "global_max_pool2d");
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,141 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_leakyrelu.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief LeakyReLU(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class LeakyReLU : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t activation_alpha; /*<! quantized alpha >*/
|
||||
int activation_exponent; /*<! exponent of quantized alpha >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of leakyrelu>*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new LeakyReLU object
|
||||
*
|
||||
* @param activation_alpha quantized alpha
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
* @param name name of leakyrelu
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
{
|
||||
this->activation_alpha = activation_alpha;
|
||||
this->activation_exponent = activation_exponent;
|
||||
this->inplace = inplace;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the LeakyReLU object
|
||||
*
|
||||
*/
|
||||
~LeakyReLU()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& LeakyReLU result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call LeakyReLU operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return LeakyReLU result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,143 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_nn_max2d.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Max2D(input0, input1).
|
||||
* NOTE: maximum is element-wise, i.e., output[i,j,k] = max(input0[i,j,k], input1[i,j,k])
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Max2D : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output ptr of max2d >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of max2d >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Max2D object.
|
||||
*
|
||||
* @param name name of max2d
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Max2D(const char *name = "Max2D", bool inplace = false) : Layer(name),
|
||||
output(NULL), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Max2D object
|
||||
*
|
||||
*/
|
||||
~Max2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
* NOTE: input0.shape must equal to input1.shape.
|
||||
* input0.exponent must equal to input1.exponent.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Max2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Max2D operation.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @return Max2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::max2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::max2d<true>(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,157 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_max_pool2d.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief MaxPool2D(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class MaxPool2D : public Layer
|
||||
{
|
||||
private:
|
||||
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of MaxPool2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of MaxPool2D >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new MaxPool2D object.
|
||||
*
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param name name of layer
|
||||
*/
|
||||
MaxPool2D(const std::vector<int> filter_shape,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = "MaxPool2D") : Layer(name),
|
||||
filter_shape(filter_shape),
|
||||
padding_type(padding_type),
|
||||
padding(padding),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the MaxPool2D object.
|
||||
*
|
||||
*/
|
||||
~MaxPool2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& MaxPool2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call MaxPool2D operation
|
||||
*
|
||||
* @param input as an input
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return MaxPool2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::max_pool2d(*this->output, input, this->padding, this->filter_shape, this->stride_y, this->stride_x);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "max_pool2d");
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,143 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_nn_min2d.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Min2D(input0, input1).
|
||||
* NOTE: minimum is element-wise, i.e., output[i,j,k] = min(input0[i,j,k], input1[i,j,k])
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Min2D : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output of ptr min2d>*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of min2d >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Min2D object
|
||||
*
|
||||
* @param name name of min2d
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Min2D(const char *name = "Min2D", bool inplace = false) : Layer(name),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({}) {}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Min2D object
|
||||
*
|
||||
*/
|
||||
~Min2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
* NOTE: input0.shape must equal to input1.shape.
|
||||
* input0.exponent must equal to input1.exponent.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(input0.exponent);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Min2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Min2D operation
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @return Min2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::min2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::min2d<true>(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,52 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Neural Network Model.
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Model
|
||||
{
|
||||
private:
|
||||
std::vector<int> input_shape; /*<! input shape in [height, width, channel] >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Destroy the Model object.
|
||||
*
|
||||
*/
|
||||
virtual ~Model() {}
|
||||
|
||||
/**
|
||||
* @brief Build a model including update output shape and input padding of each layer.
|
||||
*
|
||||
* @param input as an input
|
||||
*/
|
||||
virtual void build(Tensor<feature_t> &input) = 0;
|
||||
|
||||
/**
|
||||
* @brief Call the model layer by layer.
|
||||
*
|
||||
* @param input as an input.
|
||||
*/
|
||||
virtual void call(Tensor<feature_t> &input) = 0;
|
||||
|
||||
/**
|
||||
* @brief If input.shape changes, call Model.build(), otherwise, do not. Then call Model.call().
|
||||
*
|
||||
* @param input as an input
|
||||
*/
|
||||
void forward(Tensor<feature_t> &input);
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,151 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_mul2d.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Activation(Multiply2D(input0, input1)).
|
||||
* NOTE: multiplication is element-wise, i.e., output[i,j,k] = input0[i,j,k] * input1[i,j,k]
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Mul2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of Mul2D, if you don't specify anything, no activation is applied >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Mul2D >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Mul2D >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Mul2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param activation activation of Mul2D, if you don't specify anything, no activation is applied
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Mul2D(const int output_exponent,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const char *name = "Mul2D",
|
||||
bool inplace = false) : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
activation(activation),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Multiply2D object.
|
||||
*/
|
||||
~Mul2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
* NOTE: input0.shape must equal to input1.shape.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Mul2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Mul2D operation.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @return Mul2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,145 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_prelu.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief PReLU(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class PReLU : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
|
||||
int activation_exponent; /*<! exponent of quantized alpha elements >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of prelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new PReLU object
|
||||
*
|
||||
* @param activation_element quantized alpha elements along channel axis
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
* @param name name of prelu
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
PReLU(const feature_t *activation_element,
|
||||
const int activation_exponent = 0,
|
||||
const char *name = NULL,
|
||||
bool inplace = "PReLU") : Layer(name),
|
||||
activation_element(activation_element),
|
||||
activation_exponent(activation_exponent),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the PReLU object
|
||||
*
|
||||
*/
|
||||
~PReLU()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& PReLU result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call PReLU operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return PReLU result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->malloc_element();
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,135 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_nn_relu.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief ReLU(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class ReLU : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output ptr of relu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of relu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new ReLU object
|
||||
*
|
||||
* @param name name of relu
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
ReLU(const char *name = "ReLU", bool inplace = false) : Layer(name),
|
||||
output(NULL), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the ReLU object
|
||||
*
|
||||
*/
|
||||
~ReLU()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& ReLU result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call ReLU operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return ReLU result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::relu(*this->output, input, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "relu");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::relu(*this->output, input, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "relu");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,124 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Reshape(input)
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Reshape : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Reshape >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Reshape >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Reshape object
|
||||
*
|
||||
* @param shape the target shape
|
||||
* @param name name of Reshape layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Reshape(std::vector<int> shape, const char *name = "Reshape", bool inplace = false) : Layer(name),
|
||||
output_shape(shape), inplace(inplace)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Reshape object
|
||||
*
|
||||
*/
|
||||
~Reshape()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Reshape result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Reshape operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @return Tensor<feature_t>& Reshape result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->reshape(this->output_shape);
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "reshape");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->reshape(this->output_shape);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "reshape");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,127 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Squeeze : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Squeeze >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
int axis; /*<! the dim to to be remove. make sure the length of the dim is equal to 1.
|
||||
if axis == INT32_MAX, all the dims with length==1 will be removed. >*/
|
||||
std::vector<int> output_shape; /*<! output shape of AvgPool2D >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Squeeze object
|
||||
*
|
||||
* @param axis the dim to to be remove. make sure the length of the dim is equal to 1.
|
||||
* if axis == INT32_MAX, all the dims with length==1 will be removed.
|
||||
* @param name name of Squeeze layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Squeeze(int axis = INT32_MAX, const char *name = "Squeeze", bool inplace = false) : Layer(name), axis(axis), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Squeeze object
|
||||
*
|
||||
*/
|
||||
~Squeeze()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->squeeze(this->axis);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->squeeze(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Squeeze result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Squeeze operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @return Tensor<feature_t>& Squeeze result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "Squeeze");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_shape(this->output_shape);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "Squeeze");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,141 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_sub2d.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Activation(Sub2D(input0, input1)).
|
||||
* NOTE: subtraction is element-wise, i.e., output[i,j,k] = input0[i,j,k] - input1[i,j,k]
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Sub2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of Sub2D, if you don't specify anything, no activation is applied >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Sub2D >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Sub2D >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Sub2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param activation activation of Mul2D, if you don't specify anything, no activation is applied
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Sub2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = "Sub2D", bool inplace = false) : Layer(name),
|
||||
output_exponent(output_exponent), activation(activation), output(NULL), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Sub2D object.
|
||||
*/
|
||||
~Sub2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
* NOTE: input0.shape must equal to input1.shape.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
this->output_shape = input0.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Sub2D result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Sub2D operation.
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @return Sub2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input0, Tensor<feature_t> &input1, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output.malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::sub2d(this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::sub2d<true>(this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,126 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Transpose : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Transpose >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> perm; /*<! the new arangement of the dims. if perm == {}, the dims arangement will be reversed. >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Transpose >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Transpose object
|
||||
*
|
||||
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
|
||||
* @param name name of Transpose layer
|
||||
* @param inplace true: the output will store to input
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Transpose(std::vector<int> perm = {}, const char *name = "Transpose", bool inplace = false) : Layer(name), perm(perm), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Transpose object
|
||||
*
|
||||
*/
|
||||
~Transpose()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
this->output_shape = input.shape;
|
||||
for (int i = 0; i < this->perm.size(); i++)
|
||||
{
|
||||
this->output_shape[i] = input.shape[this->perm[i]];
|
||||
}
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Transpose result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Transpose operation.
|
||||
*
|
||||
* @param input as an input.
|
||||
* @return Tensor<feature_t>& Transpose result.
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->transpose(input, this->perm);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "transpose");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->transpose(this->perm);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "transpose");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
188
tools/sdk/esp32c3/include/esp-face/include/math/dl_math.hpp
Normal file
188
tools/sdk/esp32c3/include/esp-face/include/math/dl_math.hpp
Normal file
@ -0,0 +1,188 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_define.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace math
|
||||
{
|
||||
/**
|
||||
* @brief x^a.
|
||||
*
|
||||
* @param x as a base
|
||||
* @param a as an exponent
|
||||
* @return x^a
|
||||
*/
|
||||
inline float power(float x, int a)
|
||||
{
|
||||
if (a > 0)
|
||||
{
|
||||
return x * power(x, a - 1);
|
||||
}
|
||||
else if (a < 0)
|
||||
{
|
||||
return 1 / (x * power(x, -a - 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
return 1.f;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief sqrt(x).
|
||||
*
|
||||
* @param x as a base
|
||||
* @return sqrt(x)
|
||||
*/
|
||||
inline float sqrt_quick(float x)
|
||||
{
|
||||
const int result = 0x1fbb4000 + (*(int *)&x >> 1);
|
||||
return *(float *)&result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief 1/sqrt(x).
|
||||
*
|
||||
* @param x as a base
|
||||
* @return 1/sqrt(x)
|
||||
*/
|
||||
inline float sqrt_reciprocal_quick(float x)
|
||||
{
|
||||
float xhalf = 0.5f * x;
|
||||
int i = *(int *)&x; // get bits for floating value
|
||||
i = 0x5f375a86 - (i >> 1); // gives initial guess y0
|
||||
x = *(float *)&i; // convert bits back to float
|
||||
x = x * (1.5f - xhalf * x * x); // Newton step, repeating increases accuracy
|
||||
return x;
|
||||
}
|
||||
|
||||
static const float EN = 0.00001f;
|
||||
|
||||
/**
|
||||
* @brief sqrt(x).
|
||||
*
|
||||
* @param x as a base
|
||||
* @return sqrt(x)
|
||||
*/
|
||||
inline float sqrt_newton(float x)
|
||||
{
|
||||
/**
|
||||
* Use Newton iteration method to find the square root
|
||||
* */
|
||||
if (x == 0.f)
|
||||
return 0.f;
|
||||
float result = x;
|
||||
float last_value;
|
||||
do
|
||||
{
|
||||
last_value = result;
|
||||
result = (last_value + x / last_value) * 0.5;
|
||||
} while (DL_ABS(result - last_value) > EN);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief n-th root of x.
|
||||
*
|
||||
* @param x as a base
|
||||
* @param n root times
|
||||
* @return n-th root of x
|
||||
*/
|
||||
inline float root_newton(float x, int n)
|
||||
{
|
||||
if (n == 2)
|
||||
return sqrt_newton(x);
|
||||
if (n == 0)
|
||||
return 1.f;
|
||||
if (n == 1)
|
||||
return x;
|
||||
if (x == 0.f)
|
||||
return 0.f;
|
||||
float result = x;
|
||||
float last_value;
|
||||
float _n = (float)((n - 1) * n);
|
||||
do
|
||||
{
|
||||
last_value = result;
|
||||
result = _n * last_value + x / (n * power(last_value, n - 1));
|
||||
} while (DL_ABS(result - last_value) > EN);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief atan(x).
|
||||
*
|
||||
* @param x as an input
|
||||
* @return atan(x) in range [-pi/2, pi/2]
|
||||
*/
|
||||
inline float atan(float x)
|
||||
{
|
||||
return x * (0.78539816 - (DL_ABS(x) - 1) * (0.2447 + 0.0663 * DL_ABS(x)));
|
||||
// float s = x*x;
|
||||
// return ((-0.0464964749 * s + 0.15931422) * s - 0.327622764) * s * x + x;
|
||||
}
|
||||
|
||||
// TODO:@yuanjiong
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param x
|
||||
* @param y
|
||||
* @return in range [-pi, pi]
|
||||
*/
|
||||
inline float atan2(float x, float y)
|
||||
{
|
||||
float ax = DL_ABS(x);
|
||||
float ay = DL_ABS(y);
|
||||
float eps = 1e-8;
|
||||
float a = DL_MIN(ax, ay) / (DL_MAX(ax, ay) + eps);
|
||||
float r = atan(a); //[0, pi/2]
|
||||
if (ay > ax)
|
||||
r = 1.57079633 - r;
|
||||
if (x < 0)
|
||||
r = 3.14159265 - r;
|
||||
if (y < 0)
|
||||
r = -r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief acos(x).
|
||||
*
|
||||
* @param x as an input
|
||||
* @return acos(x) in range [-pi/2, pi/2]
|
||||
*/
|
||||
inline float acos(float x)
|
||||
{
|
||||
return atan2(x, sqrt_newton(1.0 - x * x));
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief asin(x).
|
||||
*
|
||||
* @param x as an input
|
||||
* @return asin(x) in range [0, pi]
|
||||
*/
|
||||
inline float asin(float x)
|
||||
{
|
||||
return atan2(sqrt_newton(1.0 - x * x), x);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief e^x
|
||||
*
|
||||
* @param x exponent
|
||||
* @param steps iteration steps
|
||||
* @return e^x
|
||||
*/
|
||||
inline float exp_fast(double x, int steps)
|
||||
{
|
||||
x = 1.0 + x / (1 << steps);
|
||||
for (int i = 0; i < steps; i++)
|
||||
x *= x;
|
||||
return x;
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,397 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <math.h>
|
||||
#include <vector>
|
||||
#include "dl_define.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "esp_timer.h"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace math
|
||||
{
|
||||
/**
|
||||
* @brief the Matrix class
|
||||
*
|
||||
* @tparam T
|
||||
*/
|
||||
template <typename T>
|
||||
class Matrix
|
||||
{
|
||||
public:
|
||||
T **array;
|
||||
int h;
|
||||
int w;
|
||||
Matrix() : h(0), w(0)
|
||||
{
|
||||
this->array = NULL;
|
||||
}
|
||||
|
||||
Matrix(int h, int w) : h(h), w(w)
|
||||
{
|
||||
this->calloc_element();
|
||||
}
|
||||
|
||||
Matrix(int h, int w, T s) : h(h), w(w)
|
||||
{
|
||||
this->calloc_element();
|
||||
this->set_value(s);
|
||||
}
|
||||
|
||||
Matrix(const Matrix<T> &mat) : h(mat.h), w(mat.w)
|
||||
{
|
||||
this->calloc_element();
|
||||
this->set_value(mat);
|
||||
}
|
||||
virtual ~Matrix()
|
||||
{
|
||||
if (this->array != NULL)
|
||||
{
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
free(this->array[i]);
|
||||
}
|
||||
free(this->array);
|
||||
this->array = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief calloc the matrix element
|
||||
*
|
||||
*/
|
||||
void calloc_element()
|
||||
{
|
||||
if ((this->h > 0) && (this->w > 0))
|
||||
{
|
||||
this->array = (T **)calloc(this->h, sizeof(T *));
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
this->array[i] = (T *)calloc(this->w, sizeof(T));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
this->array = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the matrix element to random number.
|
||||
*
|
||||
* @param thresh the max abs value of the element.
|
||||
*/
|
||||
void set_random(T thresh = 1)
|
||||
{
|
||||
unsigned int seed = esp_timer_get_time();
|
||||
srand(seed);
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
for (int j = 0; j < this->w; j++)
|
||||
{
|
||||
this->array[i][j] = ((T)rand()) / (T)(RAND_MAX)*thresh;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the small value to zero
|
||||
*
|
||||
* @param thresh the threshold of small value
|
||||
*/
|
||||
void set_zero(T thresh = 1e-8)
|
||||
{
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
for (int j = 0; j < this->w; j++)
|
||||
{
|
||||
if (DL_ABS(this->array[i][j]) < thresh)
|
||||
{
|
||||
this->array[i][j] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the matrix value from a vector
|
||||
*
|
||||
* @tparam TT
|
||||
* @param mat the input vector
|
||||
*/
|
||||
template <typename TT>
|
||||
void set_value(std::vector<TT> mat)
|
||||
{
|
||||
int area = this->w * this->h;
|
||||
assert(area == mat.size());
|
||||
int index = 0;
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
for (int j = 0; j < this->w; j++)
|
||||
{
|
||||
this->array[i][j] = (T)(mat[index++]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the matrix value from another matrix.
|
||||
*
|
||||
* @tparam TT
|
||||
* @param mat the input matrix.
|
||||
*/
|
||||
template <typename TT>
|
||||
void set_value(const Matrix<TT> &mat)
|
||||
{
|
||||
assert((this->h == mat.h) && (this->w == mat.w));
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
for (int j = 0; j < this->w; j++)
|
||||
{
|
||||
this->array[i][j] = (T)(mat.array[i][j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set a part of the matrix value from another matrix.
|
||||
*
|
||||
* @param h_start the start index of height
|
||||
* @param h_end the end index of height
|
||||
* @param w_start the start index of width
|
||||
* @param w_end the end index of width
|
||||
* @param mat the input matrix
|
||||
*/
|
||||
void set_value(int h_start, int h_end, int w_start, int w_end, const Matrix<T> &mat)
|
||||
{
|
||||
int h = h_end - h_start;
|
||||
int w = w_end - w_start;
|
||||
|
||||
assert((h == mat.h) && (w == mat.w));
|
||||
assert((h_end <= this->h) && (w_end <= this->w) && (h_start >= 0) && (w_start >= 0));
|
||||
for (int i = 0; i < h; i++)
|
||||
{
|
||||
for (int j = 0; j < w; j++)
|
||||
{
|
||||
this->array[i + h_start][j + w_start] = mat.array[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the matrix value to a constant.
|
||||
*
|
||||
* @tparam TT
|
||||
* @param s the input value.
|
||||
*/
|
||||
template <typename TT>
|
||||
void set_value(TT s)
|
||||
{
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
for (int j = 0; j < this->w; j++)
|
||||
{
|
||||
this->array[i][j] = (T)s;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief print the matrix element.
|
||||
*
|
||||
*/
|
||||
void print_value() const
|
||||
{
|
||||
printf("h: %d, w: %d\n", this->h, this->w);
|
||||
for (int i = 0; i < this->h; i++)
|
||||
{
|
||||
for (int j = 0; j < this->w; j++)
|
||||
{
|
||||
printf("%f ", (float)(this->array[i][j]));
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief do matrix multiply
|
||||
*
|
||||
* @param input the input matrix
|
||||
* @return Matrix<T> the output matrix
|
||||
*/
|
||||
Matrix<T> matmul(const Matrix<T> &input) const;
|
||||
|
||||
/**
|
||||
* @brief transpose the matrix
|
||||
*
|
||||
* @return Matrix<T> the transposed matrix
|
||||
*/
|
||||
Matrix<T> transpose() const;
|
||||
|
||||
/**
|
||||
* @brief get the inverse matrix
|
||||
*
|
||||
* @return Matrix<T> the output matrix
|
||||
*/
|
||||
Matrix<T> inverse() const;
|
||||
|
||||
/**
|
||||
* @brief get the diagonal of the matrix
|
||||
*
|
||||
* @return Matrix<T> the diagonal
|
||||
*/
|
||||
Matrix<T> diagonal() const;
|
||||
|
||||
/**
|
||||
* @brief slice the matrix
|
||||
*
|
||||
* @param h_start the start index of height
|
||||
* @param h_end the end index of height
|
||||
* @param w_start the start index of width
|
||||
* @param w_end the end index of width
|
||||
* @return Matrix<T> the output.
|
||||
*/
|
||||
Matrix<T> slice(int h_start, int h_end, int w_start, int w_end) const;
|
||||
|
||||
/**
|
||||
* @brief get an identity matrix
|
||||
*
|
||||
* @param n the dim of the identity matrix
|
||||
* @return Matrix<T> the output
|
||||
*/
|
||||
static Matrix<T> identity(int n)
|
||||
{
|
||||
Matrix<T> A(n, n);
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
A.array[i][i] = 1;
|
||||
}
|
||||
return A;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief get a diag matrix
|
||||
*
|
||||
* @param d the diagonal value.
|
||||
* @return Matrix<T> the output
|
||||
*/
|
||||
static Matrix<T> diag(const Matrix<T> &d)
|
||||
{
|
||||
assert(d.h == 1);
|
||||
Matrix<T> A(d.w, d.w);
|
||||
for (int i = 0; i < d.w; ++i)
|
||||
{
|
||||
A.array[i][i] = d.array[0][i];
|
||||
}
|
||||
return A;
|
||||
}
|
||||
|
||||
|
||||
static Matrix<T> arange(uint32_t n)
|
||||
{
|
||||
Matrix<T> A(1, n);
|
||||
for (int i = 0; i < n; ++i)
|
||||
{
|
||||
A.array[0][i] = i;
|
||||
}
|
||||
return A;
|
||||
}
|
||||
|
||||
static Matrix<T> arange(uint32_t n1, uint32_t n2)
|
||||
{
|
||||
int len = n2 - n1;
|
||||
assert(len > 0);
|
||||
Matrix<T> A(1, len);
|
||||
for (int i = 0; i < len; ++i)
|
||||
{
|
||||
A.array[0][i] = n1 + i;
|
||||
}
|
||||
|
||||
return A;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief get the F_norm of the matrix
|
||||
*
|
||||
* @return T the output F_norm
|
||||
*/
|
||||
T F_norm() const
|
||||
{
|
||||
T f_n = 0.0;
|
||||
for (int i = 0; i < this->h; ++i)
|
||||
{
|
||||
for (int j = 0; j < this->w; ++j)
|
||||
{
|
||||
f_n += (this->array[i][j] * this->array[i][j]);
|
||||
}
|
||||
}
|
||||
f_n = sqrt_newton(f_n);
|
||||
return f_n;
|
||||
}
|
||||
|
||||
Matrix<T> &operator=(const Matrix<T> &A)
|
||||
{
|
||||
if ((A.h == this->h) && (A.w == this->w))
|
||||
{
|
||||
for (int i = 0; i < A.h; ++i)
|
||||
{
|
||||
for (int j = 0; j < A.w; ++j)
|
||||
{
|
||||
this->array[i][j] = A.array[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (this->array != NULL)
|
||||
{
|
||||
for (int i = 0; i < this->h; ++i)
|
||||
{
|
||||
free(this->array[i]);
|
||||
}
|
||||
free(this->array);
|
||||
this->array = NULL;
|
||||
}
|
||||
this->h = A.h;
|
||||
this->w = A.w;
|
||||
if ((A.h > 0) && (A.w > 0))
|
||||
{
|
||||
this->calloc_element();
|
||||
this->set_value(A);
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Get the affine transform matrix
|
||||
*
|
||||
* @param source_coord the source coordinates
|
||||
* @param dest_coord the target coordinates
|
||||
* @return Matrix<float> the output matrix
|
||||
*/
|
||||
Matrix<float> get_affine_transform(Matrix<float> &source_coord, Matrix<float> &dest_coord);
|
||||
|
||||
/**
|
||||
* @brief Get the similarity transform matrix
|
||||
*
|
||||
* @param source_coord the source coordinates
|
||||
* @param dest_coord the target coordinates
|
||||
* @return Matrix<float> the output matrix
|
||||
*/
|
||||
Matrix<float> get_similarity_transform(Matrix<float> &source_coord, Matrix<float> &dest_coord);
|
||||
|
||||
/**
|
||||
* @brief Get the perspective transform matrix
|
||||
*
|
||||
* @param source_coord the source coordinates
|
||||
* @param dest_coord the target coordinates
|
||||
* @return Matrix<float> the output matrix
|
||||
*/
|
||||
Matrix<float> get_perspective_transform(Matrix<float> &source_coord, Matrix<float> &dest_coord);
|
||||
} // namespace math
|
||||
} // namespace dl
|
@ -0,0 +1,47 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include "dl_detect_define.hpp"
|
||||
|
||||
/**
|
||||
* @brief Hardware Requirement.
|
||||
* - flash 310kB
|
||||
*/
|
||||
|
||||
class CatFaceDetectMN03
|
||||
{
|
||||
private:
|
||||
void *model;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Cat Face Detect MN03 object.
|
||||
*
|
||||
* @param score_threshold predicted boxes with score lower than the threshold will be filtered out
|
||||
* @param nms_threshold predicted boxes with IoU higher than the threshold will be filtered out
|
||||
* @param top_k first k highest score boxes will be remained
|
||||
* @param resize_scale resize scale to implement on input image
|
||||
*/
|
||||
CatFaceDetectMN03(const float score_threshold, const float nms_threshold, const int top_k, const float resize_scale);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Cat Face Detect MN03 object.
|
||||
*
|
||||
*/
|
||||
~CatFaceDetectMN03();
|
||||
|
||||
/**
|
||||
* @brief Inference.
|
||||
*
|
||||
* @tparam T supports uint8_t and uint16_t
|
||||
* - uint8_t: input image is RGB888
|
||||
* - uint16_t: input image is RGB565
|
||||
* @param input_element pointer of input image
|
||||
* @param input_shape shape of input image
|
||||
* @return detection result
|
||||
*/
|
||||
template <typename T>
|
||||
std::list<dl::detect::result_t> &infer(T *input_element, std::vector<int> input_shape);
|
||||
};
|
@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_image.hpp"
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int area; /*!< Area of connected domains >*/
|
||||
std::vector<int> center; /*<! centroid of connected domains [x, y] >*/
|
||||
std::vector<int> box; /*<! [left_up_x, left_up_y, right_down_x, right_down_y] >*/
|
||||
} components_stats_t;
|
||||
|
||||
class ColorDetector
|
||||
{
|
||||
private:
|
||||
std::vector<std::vector<components_stats_t>> results; /*!< detection results >*/
|
||||
|
||||
public:
|
||||
std::vector<std::vector<uint8_t>> color_thresh; /*!< threshold of colors, The threshold of each color is composed of 6 numbers >*/
|
||||
std::vector<int> area_thresh; /*!< the area threshold of each color,
|
||||
the area that is smaller than the threshold is filtered >*/
|
||||
bool bgr; /*!< true: the input image is in BGR format
|
||||
false: the input image is in RGB format >*/
|
||||
|
||||
/**
|
||||
* @brief get the color threshold of rectangular region in the image
|
||||
*
|
||||
* @param image the input image
|
||||
* @param box the coordinates of the rectanglar region : [left_up_x, left_up_y, right_down_x, right_down_y]
|
||||
* @return std::vector<uint8_t> the threshold.
|
||||
*/
|
||||
std::vector<uint8_t> cal_color_thresh(dl::Tensor<uint8_t> &image, std::vector<int> box);
|
||||
|
||||
/**
|
||||
* @brief detect the colors based on the color thresholds
|
||||
*
|
||||
* @param image the input image.
|
||||
* @return std::vector<std::vector<components_stats_t>>& detection result.
|
||||
*/
|
||||
std::vector<std::vector<components_stats_t>> &detect(dl::Tensor<uint8_t> &image);
|
||||
|
||||
/**
|
||||
* @brief Construct a new Color Detector object
|
||||
*
|
||||
* @param color_thresh threshold of colors, The threshold of each color is composed of 6 numbers
|
||||
* @param area_thresh the area threshold of each color,the area that is smaller than the threshold is filtered
|
||||
* @param bgr true: the input image is in BGR format
|
||||
* false: the input image is in RGB format
|
||||
*/
|
||||
ColorDetector(std::vector<std::vector<uint8_t>> color_thresh, std::vector<int> area_thresh, bool bgr = false) : color_thresh(color_thresh), area_thresh(area_thresh), bgr(bgr)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Color Detector object
|
||||
*
|
||||
*/
|
||||
~ColorDetector() {}
|
||||
|
||||
/**
|
||||
* @brief Get the results object
|
||||
*
|
||||
* @return std::vector<std::vector<components_stats_t>>& the detection result.
|
||||
*/
|
||||
std::vector<std::vector<components_stats_t>> &get_results()
|
||||
{
|
||||
return this->results;
|
||||
}
|
||||
};
|
@ -0,0 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_variable.hpp"
|
||||
#include "face_recognition_tool.hpp"
|
||||
#include "face_recognizer.hpp"
|
||||
#include <vector>
|
||||
|
||||
using namespace dl;
|
||||
|
||||
/**
|
||||
* @brief face recognition model v1
|
||||
* input size: 112 x 112 x 3
|
||||
* quantization mode: S16
|
||||
*
|
||||
*/
|
||||
class FaceRecognition112V1S16 : public FaceRecognizer<int16_t>
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Face_Recognition_112_V1_S16 object
|
||||
*
|
||||
*/
|
||||
FaceRecognition112V1S16();
|
||||
|
||||
/**
|
||||
* @brief Destroy the Face_Recognition_112_V1_S16 object
|
||||
*
|
||||
*/
|
||||
~FaceRecognition112V1S16();
|
||||
};
|
@ -0,0 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_variable.hpp"
|
||||
#include "face_recognition_tool.hpp"
|
||||
#include "face_recognizer.hpp"
|
||||
#include <vector>
|
||||
|
||||
using namespace dl;
|
||||
|
||||
/**
|
||||
* @brief face recognition model v1
|
||||
* input size: 112 x 112 x 3
|
||||
* quantization mode: S8
|
||||
*
|
||||
*/
|
||||
class FaceRecognition112V1S8 : public FaceRecognizer<int8_t>
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Face_Recognition_112_V1_S8 object
|
||||
*
|
||||
*/
|
||||
FaceRecognition112V1S8();
|
||||
|
||||
/**
|
||||
* @brief Destroy the Face Recognition_112_V1_S8 object
|
||||
*
|
||||
*/
|
||||
~FaceRecognition112V1S8();
|
||||
};
|
@ -0,0 +1,162 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_define.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_math.hpp"
|
||||
#include "dl_math_matrix.hpp"
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include <algorithm>
|
||||
#include <math.h>
|
||||
#include <string>
|
||||
|
||||
/**
|
||||
* @brief struct of face similarity
|
||||
*
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
int id;
|
||||
std::string name;
|
||||
float similarity;
|
||||
} face_info_t;
|
||||
|
||||
|
||||
/**
|
||||
* @brief Face ID
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class FaceID
|
||||
{
|
||||
public:
|
||||
int id; /*<! id index >*/
|
||||
dl::Tensor<feature_t> id_emb; /*<! id embedding >*/
|
||||
std::string name; /*<! id name >*/
|
||||
|
||||
/**
|
||||
* @brief Construct a new Face ID object
|
||||
*
|
||||
* @param id id index
|
||||
* @param id_emb id embedding
|
||||
* @param name id name
|
||||
*/
|
||||
FaceID(int id, dl::Tensor<feature_t> &id_emb, std::string name = "");
|
||||
|
||||
/**
|
||||
* @brief Destroy the Face ID object
|
||||
*
|
||||
*/
|
||||
~FaceID() {}
|
||||
|
||||
/**
|
||||
* @brief print the face id information
|
||||
*
|
||||
*/
|
||||
void print();
|
||||
};
|
||||
|
||||
namespace face_recognition_tool
|
||||
{
|
||||
/**
|
||||
* @brief l2 normalize the feautre
|
||||
*
|
||||
* @param feature
|
||||
*/
|
||||
void l2_norm(dl::Tensor<float> &feature);
|
||||
|
||||
/**
|
||||
* @brief calculate the cosine distance of the input ids
|
||||
*
|
||||
* @param id_1 id 1
|
||||
* @param id_2 id 2
|
||||
* @param normalized_ids true: the input ids have been normalized.
|
||||
* false: the input ids have not been normlized
|
||||
* @param type 0: cos dist: [-1, 1]
|
||||
* 1: normalzied cos dist: [0, 1]
|
||||
* @return float the cosine distance
|
||||
*/
|
||||
float cos_distance(dl::Tensor<float> &id_1, dl::Tensor<float> &id_2, bool normalized_ids = true, int8_t type = 0);
|
||||
|
||||
/**
|
||||
* @brief transform the image to the input of a mfn model
|
||||
*
|
||||
* @tparam T
|
||||
* @param image the input image.
|
||||
* @param free_input true: free the input image.
|
||||
* false: do not free the input image.
|
||||
* @param do_padding true: pad the result.
|
||||
* false: do not pad the result.
|
||||
* @return dl::Tensor<T>*
|
||||
*/
|
||||
template <typename T>
|
||||
dl::Tensor<T> *transform_mfn_input(dl::Tensor<uint8_t> &image, bool free_input = false);
|
||||
|
||||
/**
|
||||
* @brief transform the image to the input of a mfn model
|
||||
*
|
||||
* @tparam T
|
||||
* @param image the input image.
|
||||
* @param output the preprocessed image.
|
||||
* @param free_input true: free the input image.
|
||||
* false: do not free the input image.
|
||||
* @param do_padding true: pad the result.
|
||||
* false: do not pad the result
|
||||
*/
|
||||
template <typename T>
|
||||
void transform_mfn_input(dl::Tensor<uint8_t> &image, dl::Tensor<T> &output, bool free_input = false);
|
||||
|
||||
/**
|
||||
* @brief transform the mfn output embedding to a floating embedding
|
||||
*
|
||||
* @tparam T
|
||||
* @param input the input embedding.
|
||||
* @param norm true: normalize the output embedding.
|
||||
* false: do not normalize the output embedding.
|
||||
* @param free_input true: free the input embedding.
|
||||
* false: do not free the input embedding.
|
||||
* @return dl::Tensor<float>*
|
||||
*/
|
||||
template <typename T>
|
||||
dl::Tensor<float> *transform_mfn_output(dl::Tensor<T> &input, bool norm = true, bool free_input = false);
|
||||
|
||||
/**
|
||||
* @brief transform the mfn output embedding to a floating embedding
|
||||
*
|
||||
* @tparam T
|
||||
* @param input the input embedding.
|
||||
* @param output the output embedding.
|
||||
* @param norm true: normalize the output embedding.
|
||||
* false: do not normalize the output embedding.
|
||||
* @param free_input true: free the input embedding.
|
||||
* false: do not free the input embedding.
|
||||
*/
|
||||
template <typename T>
|
||||
void transform_mfn_output(dl::Tensor<T> &input, dl::Tensor<float> &output, bool norm = true, bool free_input = false);
|
||||
|
||||
/**
|
||||
* @brief get the aligned face.
|
||||
*
|
||||
* @tparam T
|
||||
* @param input input tensor
|
||||
* @param output the output aligned face.
|
||||
* @param landmarks the landmarks of the face.
|
||||
*/
|
||||
template <typename T>
|
||||
void align_face(dl::Tensor<T> *input, dl::Tensor<T> *output, std::vector<int> &landmarks);
|
||||
|
||||
/**
|
||||
* @brief get the aligned face.
|
||||
*
|
||||
* @tparam T
|
||||
* @param input input image with rgb565 format.
|
||||
* @param shape the shape of the input image.
|
||||
* @param output the output aligned face.
|
||||
* @param landmarks the landmarks of the face.
|
||||
*/
|
||||
template <typename T>
|
||||
void align_face(uint16_t *input, std::vector<int> shape, dl::Tensor<T> *output, std::vector<int> &landmarks);
|
||||
|
||||
} // namespace face_recognition_tool
|
@ -0,0 +1,220 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_variable.hpp"
|
||||
#include "face_recognition_tool.hpp"
|
||||
#include <vector>
|
||||
|
||||
using namespace dl;
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template<typename feature_t>
|
||||
class FaceRecognizer
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Face Recognizer object
|
||||
*
|
||||
*/
|
||||
FaceRecognizer();
|
||||
|
||||
/**
|
||||
* @brief Destroy the Face Recognizer object
|
||||
*
|
||||
*/
|
||||
virtual ~FaceRecognizer();
|
||||
|
||||
void *model;
|
||||
|
||||
/**
|
||||
* @brief Set the face recognition threshold [-1, 1], default thresh: 0.55
|
||||
* Note: If the similarity of two faces is greater than the threshold, they will be judged as the same person
|
||||
*
|
||||
* @param thresh
|
||||
*/
|
||||
void set_thresh(float thresh);
|
||||
|
||||
/**
|
||||
* @brief Get the current threshold of recognizer.
|
||||
*
|
||||
* @return float current threshold.
|
||||
*/
|
||||
float get_thresh();
|
||||
|
||||
/**
|
||||
* @brief Get the input shape of the recognizer.
|
||||
*
|
||||
* @return std::vector<int> the input shape of the recognizer.
|
||||
*/
|
||||
std::vector<int> get_input_shape();
|
||||
|
||||
/**
|
||||
* @brief do forward
|
||||
*
|
||||
* @param model_input the input data of the face recognition model.
|
||||
* Note: the input data should have been preprocessed.
|
||||
* @return Tensor<feature_t>& the output of the face recognition model.
|
||||
*/
|
||||
Tensor<feature_t> &forward(Tensor<feature_t> &model_input);
|
||||
|
||||
/**
|
||||
* @brief recognize face
|
||||
*
|
||||
* @param image_input the pointer of the input image with format bgr565.
|
||||
* @param shape the shape of the input image
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @return face_info_t the recognition result.
|
||||
*/
|
||||
face_info_t recognize(uint16_t *image_input, std::vector<int> shape, std::vector<int> &landmarks);
|
||||
|
||||
/**
|
||||
* @brief recognize face
|
||||
*
|
||||
* @param image_input the pointer of the input image with format bgr565.
|
||||
* @param shape the shape of the input image
|
||||
* @param aligned_face the Tensor to store the intermeidate aligned face.
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @return face_info_t the recognition result.
|
||||
*/
|
||||
face_info_t recognize(uint16_t *image_input, std::vector<int> shape, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks);
|
||||
|
||||
/**
|
||||
* @brief recognize face
|
||||
*
|
||||
* @param image_input the Tensor of input image with format bgr888.
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @return face_info_t the recognition result.
|
||||
*/
|
||||
face_info_t recognize(Tensor<uint8_t> &image_input, std::vector<int> &landmarks);
|
||||
|
||||
/**
|
||||
* @brief recognize face
|
||||
*
|
||||
* @param image_input the Tensor of input image with format bgr888.
|
||||
* @param aligned_face the Tensor to store the intermeidate aligned face.
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @return face_info_t the recognition result.
|
||||
*/
|
||||
face_info_t recognize(Tensor<uint8_t> &image_input, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks);
|
||||
|
||||
/**
|
||||
* @brief recognize face
|
||||
*
|
||||
* @param aligned_face the Tensor of the input aligned face with format bgr888.
|
||||
* @return face_info_t the recognition result.
|
||||
*/
|
||||
face_info_t recognize(Tensor<uint8_t> &aligned_face);
|
||||
|
||||
/**
|
||||
* @brief recognize the face embedding.
|
||||
*
|
||||
* @param emb the normalized face embbeding.
|
||||
* @return face_info_t the recognition result.
|
||||
*/
|
||||
face_info_t recognize(Tensor<float> &emb);
|
||||
|
||||
/**
|
||||
* @brief Get the index of the enrolled ids
|
||||
*
|
||||
* @return std::vector<int> a vector of face ids index
|
||||
*/
|
||||
std::vector<face_info_t> get_enrolled_ids();
|
||||
|
||||
/**
|
||||
* @brief Get the face embedding
|
||||
*
|
||||
* @param id the face id index
|
||||
* @return Tensor<float> the face embedding of the face id index.
|
||||
* if there is no matched id return the embedding of last input image.
|
||||
*/
|
||||
Tensor<float> &get_face_emb(int id=-1);
|
||||
|
||||
/**
|
||||
* @brief Get the number of enrolled id
|
||||
*
|
||||
* @return int the number of enrolled id
|
||||
*/
|
||||
int get_enrolled_id_num();
|
||||
|
||||
/**
|
||||
* @brief enroll face id
|
||||
*
|
||||
* @param image_input the pointer of the input image with format bgr565.
|
||||
* @param shape the shape of the input image
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @param name name of the face id.
|
||||
* @return int the face id index of the enrolled embedding.
|
||||
*/
|
||||
int enroll_id(uint16_t *image_input, std::vector<int> shape, std::vector<int> &landmarks, std::string name="");
|
||||
|
||||
/**
|
||||
* @brief enroll face id
|
||||
*
|
||||
* @param image_input the pointer of the input image with format bgr565.
|
||||
* @param shape the shape of the input image
|
||||
* @param aligned_face the Tensor to store the intermeidate aligned face.
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @param name name of the face id.
|
||||
* @return int the face id index of the enrolled embedding.
|
||||
*/
|
||||
int enroll_id(uint16_t *image_input, std::vector<int> shape, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks, std::string name="");
|
||||
|
||||
/**
|
||||
* @brief enroll face id
|
||||
*
|
||||
* @param image_input the Tensor of input image with format bgr888.
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @param name name of the face id.
|
||||
* @return int the face id index of the enrolled embedding.
|
||||
*/
|
||||
int enroll_id(Tensor<uint8_t> &image_input, std::vector<int> &landmarks, std::string name="");
|
||||
|
||||
/**
|
||||
* @brief enroll face id
|
||||
*
|
||||
* @param image_input the Tensor of input image with format bgr888.
|
||||
* @param aligned_face the Tensor to store the intermeidate aligned face.
|
||||
* @param landmarks face landmarks coordinates
|
||||
* @param name name of the face id.
|
||||
* @return int the face id index of the enrolled embedding.
|
||||
*/
|
||||
int enroll_id(Tensor<uint8_t> &image_input, Tensor<uint8_t> &aligned_face, std::vector<int> &landmarks, std::string name="");
|
||||
|
||||
/**
|
||||
* @brief enroll face id
|
||||
*
|
||||
* @param aligned_face the Tensor of the input aligned face with format bgr888.
|
||||
* @param name name of the face id.
|
||||
* @return int the face id index of the enrolled embedding.
|
||||
*/
|
||||
int enroll_id(Tensor<uint8_t> &aligned_face, std::string name="");
|
||||
|
||||
/**
|
||||
* @brief enroll the normalzied face embedding.
|
||||
*
|
||||
* @param emb the normalized face embbeding.
|
||||
* @param name name of the face id.
|
||||
* @return int the face id index of the enrolled embedding.
|
||||
*/
|
||||
int enroll_id(Tensor<float> &emb, std::string name="");
|
||||
|
||||
/**
|
||||
* @brief delete the last enrolled face id.
|
||||
*
|
||||
* @return int the number of remained face ids.
|
||||
* if the face ids list is empty, return -1
|
||||
*/
|
||||
int delete_id();
|
||||
|
||||
/**
|
||||
* @brief delete the face id with id index.
|
||||
*
|
||||
* @param id face id index.
|
||||
* @return int the number of remained face ids.
|
||||
* if there is no matched id return -1
|
||||
*/
|
||||
int delete_id(int id);
|
||||
};
|
@ -0,0 +1,41 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include "dl_detect_define.hpp"
|
||||
|
||||
class HumanFaceDetectMNP01
|
||||
{
|
||||
private:
|
||||
void *model;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Human Face Detect MNP01 object.
|
||||
*
|
||||
* @param score_threshold predicted boxes with score lower than the threshold will be filtered out
|
||||
* @param nms_threshold predicted boxes with IoU higher than the threshold will be filtered out
|
||||
* @param top_k first k highest score boxes will be remained
|
||||
*/
|
||||
HumanFaceDetectMNP01(const float score_threshold, const float nms_threshold, const int top_k);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Human Face Detect MNP01 object.
|
||||
*
|
||||
*/
|
||||
~HumanFaceDetectMNP01();
|
||||
|
||||
/**
|
||||
* @brief Inference.
|
||||
*
|
||||
* @tparam T supports uint16_t and uint8_t,
|
||||
* - uint16_t: input image is RGB565
|
||||
* - uint8_t: input image is RGB888
|
||||
* @param input_element pointer of input image
|
||||
* @param input_shape shape of input image
|
||||
* @param candidates candidate boxes on input image
|
||||
* @return detection result
|
||||
*/
|
||||
template <typename T>
|
||||
std::list<dl::detect::result_t> &infer(T *input_element, std::vector<int> input_shape, std::list<dl::detect::result_t> &candidates);
|
||||
};
|
@ -0,0 +1,40 @@
|
||||
#pragma once
|
||||
|
||||
#include <list>
|
||||
#include <vector>
|
||||
#include "dl_detect_define.hpp"
|
||||
|
||||
class HumanFaceDetectMSR01
|
||||
{
|
||||
private:
|
||||
void *model;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Human Face Detect MSR01 object
|
||||
*
|
||||
* @param score_threshold predicted boxes with score lower than the threshold will be filtered out
|
||||
* @param nms_threshold predicted boxes with IoU higher than the threshold will be filtered out
|
||||
* @param top_k first k highest score boxes will be remained
|
||||
* @param resize_scale resize scale to implement on input image
|
||||
*/
|
||||
HumanFaceDetectMSR01(const float score_threshold, const float nms_threshold, const int top_k, float resize_scale);
|
||||
|
||||
/**
|
||||
* @brief Destroy the Human Face Detect MSR01 object
|
||||
*/
|
||||
~HumanFaceDetectMSR01();
|
||||
|
||||
/**
|
||||
* @brief Inference.
|
||||
*
|
||||
* @tparam T supports uint8_t and uint16_t
|
||||
* - uint8_t: input image is RGB888
|
||||
* - uint16_t: input image is RGB565
|
||||
* @param input_element pointer of input image
|
||||
* @param input_shape shape of input image
|
||||
* @return detection result
|
||||
*/
|
||||
template <typename T>
|
||||
std::list<dl::detect::result_t> &infer(T *input_element, std::vector<int> input_shape);
|
||||
};
|
61
tools/sdk/esp32c3/include/esp-face/include/nn/dl_nn.hpp
Normal file
61
tools/sdk/esp32c3/include/esp-face/include/nn/dl_nn.hpp
Normal file
@ -0,0 +1,61 @@
|
||||
#pragma once
|
||||
#include <vector>
|
||||
#include "dl_define.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief Get the output shape object
|
||||
*
|
||||
* @param input_shape input shape
|
||||
* @param filter_shape filter shape with dilation
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param pad_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN
|
||||
* @param is_conv2d one of true or false,
|
||||
* - true: serve for Conv2D
|
||||
* - false: serve for other operations
|
||||
* @return std::vector<int>
|
||||
*/
|
||||
std::vector<int> get_output_shape(const std::vector<int> &input_shape, const std::vector<int> &filter_shape, const int stride_y, const int stride_x, const padding_type_t pad_type, const bool is_conv2d = false, std::vector<int> padding = {});
|
||||
|
||||
/**
|
||||
* @brief Get the pad size object
|
||||
*
|
||||
* @param output_shape output shape
|
||||
* @param input_shape input shape
|
||||
* @param filter_shape filter shape with dilation
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN
|
||||
* @return padding size
|
||||
*/
|
||||
std::vector<int> get_pad_size(const std::vector<int> &output_shape, const std::vector<int> &input_shape, const std::vector<int> &filter_shape, const int stride_y, const int stride_x, const padding_type_t padding_type);
|
||||
} // namespace nn
|
||||
} // namespace dl
|
||||
|
||||
#if DL_LOG_NN_LATENCY
|
||||
/**
|
||||
* @brief Initialize.
|
||||
*/
|
||||
#define DL_LOG_NN_LATENCY_INIT() dl::tool::Latency latency
|
||||
|
||||
/**
|
||||
* @brief Time starts.
|
||||
*/
|
||||
#define DL_LOG_NN_LATENCY_START() latency.start()
|
||||
|
||||
/**
|
||||
* @brief Time ends and printed.
|
||||
*/
|
||||
#define DL_LOG_NN_LATENCY_END(key) \
|
||||
latency.end(); \
|
||||
latency.print("nn", key)
|
||||
#else
|
||||
#define DL_LOG_NN_LATENCY_INIT()
|
||||
#define DL_LOG_NN_LATENCY_START()
|
||||
#define DL_LOG_NN_LATENCY_END(key)
|
||||
#endif
|
@ -0,0 +1,91 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief activation(add2d(input0, input1)).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of add2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void add2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(add2d(input0, input1)).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of add2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void add2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE, const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(add2d(input0, input1))
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input0
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of add2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @param inplace whether directly store the output to input0
|
||||
* @return add2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto add2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
Tensor<feature_t> output;
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
add2d(output, input0, input1, activation, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("add2d");
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
add2d(input0, input0, input1, activation, assign_core, output_exponent);
|
||||
input0.set_exponent(output_exponent);
|
||||
DL_LOG_NN_LATENCY_END("add2d");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,102 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
#include <stdint.h>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief avg_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter_shape filter_shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void avg_pool2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
std::vector<int> &padding,
|
||||
std::vector<int> &filter_shape,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief avg_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter_shape filter_shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void avg_pool2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
std::vector<int> &filter_shape,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief avg_pool2d(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input as an input
|
||||
* @param filter_shape filter_shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID: no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param assign_core not effective yet
|
||||
* @return avg_pool2d result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> avg_pool2d(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
std::vector<int> filter_shape,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const padding_type_t padding_type,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
std::vector<int> padding(4, 0);
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
avg_pool2d(output, input, padding, filter_shape, stride_y, stride_x, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("avg_pool2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,63 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
template <typename feature_t>
|
||||
void concat(Tensor<feature_t> &output, std::vector<Tensor<feature_t> *> &inputs, int axis, bool free_inputs = false);
|
||||
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> concat(std::vector<Tensor<feature_t> *> &inputs, int axis, bool free_inputs = false)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
assert(inputs.size() > 1);
|
||||
int shape_size = inputs[0]->shape.size();
|
||||
|
||||
if (axis < 0)
|
||||
{
|
||||
axis = shape_size + axis;
|
||||
}
|
||||
|
||||
assert((axis < shape_size) && (axis > -1));
|
||||
|
||||
int output_shape_axis = inputs[0]->shape[axis];
|
||||
|
||||
for (int i = 1; i < inputs.size(); i++)
|
||||
{
|
||||
assert(shape_size == inputs[i]->shape.size());
|
||||
assert(inputs[i]->exponent == inputs[i - 1]->exponent);
|
||||
output_shape_axis += inputs[i]->shape[axis];
|
||||
|
||||
for (int j = 0; j < shape_size; j++)
|
||||
{
|
||||
if (j != axis)
|
||||
{
|
||||
assert(inputs[i]->shape[j] == inputs[i - 1]->shape[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("assert");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
Tensor<feature_t> output;
|
||||
std::vector<int> output_shape = inputs[0]->shape;
|
||||
output_shape[axis] = output_shape_axis;
|
||||
output.set_shape(output_shape);
|
||||
output.set_exponent(inputs[0]->exponent);
|
||||
output.malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("malloc");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
concat(output, inputs, axis, free_inputs);
|
||||
DL_LOG_NN_LATENCY_END("concat");
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,22 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "dl_variable.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief concat2d(input_1, input_2, ...)
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output as an output
|
||||
* @param inputs a bundle of inputs to be concatenated
|
||||
*/
|
||||
template <typename feature_t>
|
||||
void concat2d(Tensor<feature_t> &output, std::vector<Tensor<feature_t>> inputs);
|
||||
} // namespace nn
|
||||
} // namespace dl
|
136
tools/sdk/esp32c3/include/esp-face/include/nn/dl_nn_conv2d.hpp
Normal file
136
tools/sdk/esp32c3/include/esp-face/include/nn/dl_nn_conv2d.hpp
Normal file
@ -0,0 +1,136 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter filter of conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void conv2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int16_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int16_t> *const bias = NULL,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter filter of conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void conv2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int8_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int8_t> *const bias = NULL,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter filter of conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void conv2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int8_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int16_t> *const bias = NULL,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input as an input
|
||||
* @param filter Filter of conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID: no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param bias bias of conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @return conv2d result
|
||||
*/
|
||||
template <typename feature_t, typename bias_t>
|
||||
Tensor<feature_t> conv2d(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
const Filter<feature_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const padding_type_t padding_type,
|
||||
const Bias<bias_t> *bias,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type, true);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
std::vector<int> padding(4, 0);
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
conv2d(output, input, padding, filter, stride_y, stride_x, bias, activation, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("conv2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,137 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief activate(depthwise_conv2d(input, filter) + bias)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter Filter of depthwise_conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void depthwise_conv2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int16_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int16_t> *bias = NULL,
|
||||
const Activation<int16_t> *activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activate(depthwise_conv2d(input, filter) + bias)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter filter of depthwise_conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void depthwise_conv2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int8_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int8_t> *bias = NULL,
|
||||
const Activation<int8_t> *activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activate(depthwise_conv2d(input, filter) + bias)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter Filter of depthwise_conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void depthwise_conv2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int8_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int16_t> *bias = NULL,
|
||||
const Activation<int8_t> *activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(depthwise_conv2d(input, filter) + bias)
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input as an input
|
||||
* @param filter filter of depthwise_conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param pad_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @return depthwise_conv2d result
|
||||
*/
|
||||
template <typename feature_t, typename bias_t>
|
||||
Tensor<feature_t> depthwise_conv2d(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
const Filter<feature_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const padding_type_t padding_type,
|
||||
const Bias<bias_t> *bias,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
std::vector<int> padding(4, 0);
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
depthwise_conv2d(output, input, padding, filter, stride_y, stride_x, bias, activation, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("depthwise_conv2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,126 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void fully_connected(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const Filter<int16_t> &filter,
|
||||
const Bias<int16_t> *const bias = NULL,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const bool flatten = true,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void fully_connected(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const Filter<int8_t> &filter,
|
||||
const Bias<int8_t> *const bias = NULL,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const bool flatten = true,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void fully_connected(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const Filter<int8_t> &filter,
|
||||
const Bias<int16_t> *const bias = NULL,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const bool flatten = true,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input as an input
|
||||
* @param filter Filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
* @return FullyConnected result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> fully_connected(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
const Filter<feature_t> &filter,
|
||||
const Bias<feature_t> *bias,
|
||||
const Activation<feature_t> *activation,
|
||||
const bool flatten,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
assert(filter.shape.size() == 4);
|
||||
assert(filter.shape[0] == 1);
|
||||
assert(filter.shape[1] == 1);
|
||||
|
||||
std::vector<int> output_shape;
|
||||
if (flatten)
|
||||
{
|
||||
assert(input.get_size() == filter.shape[2]);
|
||||
output_shape = {filter.shape.back()};
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(input.shape.back() == filter->shape[2]);
|
||||
output_shape = input.shape;
|
||||
output_shape[output_shape.size() - 1] = filter.shape.back();
|
||||
}
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
fully_connected(output, input, filter, bias, activation, flatten, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("fully_connected");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,66 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
#include <stdint.h>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief global_avg_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void global_avg_pool2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief global_avg_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void global_avg_pool2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief global_avg_pool2d(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return global_avg_pool2d result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> global_avg_pool2d(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
global_avg_pool2d(output, input, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("global_avg_pool2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,64 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
#include <stdint.h>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief global_max_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void global_max_pool2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief global_max_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void global_max_pool2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief global_max_pool2d(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return global_max_pool2d result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> global_max_pool2d(Tensor<feature_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
global_max_pool2d(output, input, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("global_max_pool2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,82 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief leakyrelu(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param activation_alpha quantized alpha
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void leakyrelu(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const int16_t activation_alpha,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief leakyrelu(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param activation_alpha quantized alpha
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void leakyrelu(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const int8_t activation_alpha,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief leakyrelu(input)
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param input as an input
|
||||
* @param activation_alpha quantized alpha
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
* @param assign_core not effective yet
|
||||
* @return leakyrelu result or no return(result store to input)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto leakyrelu(Tensor<feature_t> &input,
|
||||
const int activation_alpha,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
leakyrelu(output, input, activation_alpha, activation_exponent, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("leakyrelu");
|
||||
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
leakyrelu(input, input, activation_alpha, activation_exponent, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("leakyrelu");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,81 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief max2d(input0, input1)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void max2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief max2d(input0, input1)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void max2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief max2d(input0, input1)
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input0
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @return max2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto max2d(Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input0.exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
max2d(output, input0, input1, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("max2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
max2d(input0, input0, input1, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("max2d");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,101 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
#include <stdint.h>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief max_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void max_pool2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
std::vector<int> &padding,
|
||||
std::vector<int> &filter_shape,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief max_pool2d(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void max_pool2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
std::vector<int> &filter_shape,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief max_pool2d(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param input as an input
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID: no padding
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param assign_core not effective yet
|
||||
* @return max_pool2d result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> max_pool2d(Tensor<feature_t> &input,
|
||||
std::vector<int> filter_shape,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const padding_type_t padding_type,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
std::vector<int> padding(4, 0);
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
max_pool2d(output, input, padding, filter_shape, stride_y, stride_x, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("max_pool2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,80 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief min2d(input0, input1)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core
|
||||
*/
|
||||
void min2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief min2d(input0, input1)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core
|
||||
*/
|
||||
void min2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief min2d(input0, input1)
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input0
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param assign_core not effective yet
|
||||
* @return min2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto min2d(Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input0.exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
min2d(output, input0, input1, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("min2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
min2d(input0, input0, input1, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("min2d");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,91 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief activation(mul2d(input0, input1)).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of mul2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void mul2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(mul2d(input0, input1)).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of mul2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void mul2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(mul2d(input0, input1)).
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input0
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of mul2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @return mul2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto mul2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
mul2d(output, input0, input1, activation, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("mul2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
mul2d(input0, input0, input1, activation, assign_core, output_exponent);
|
||||
DL_LOG_NN_LATENCY_END("mul2d");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,82 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief prelu(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param activation_element quantized alpha elements along channel axis
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void prelu(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const int16_t *activation_element,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief prelu(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param activation_element quantized alpha elements along channel axis
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void prelu(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const int8_t *activation_element,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief prelu(input)
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param input as an input
|
||||
* @param activation_element quantized alpha elements along channel axis
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
* @param assign_core not effective yet
|
||||
* @return prelu result or no return(result store to input)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto prelu(Tensor<feature_t> &input,
|
||||
const feature_t *activation_element,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
prelu(output, input, activation_element, activation_exponent, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("prelu");
|
||||
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
prelu(input, input, activation_element, activation_exponent, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("prelu");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
70
tools/sdk/esp32c3/include/esp-face/include/nn/dl_nn_relu.hpp
Normal file
70
tools/sdk/esp32c3/include/esp-face/include/nn/dl_nn_relu.hpp
Normal file
@ -0,0 +1,70 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief relu(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void relu(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief relu(input).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void relu(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief relu(input)
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return relu result or no return(result store to input)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto relu(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
relu(output, input, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("relu");
|
||||
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
relu(input, input, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("relu");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -0,0 +1,90 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief activation(sub2d(input0, input1)).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of sub2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void sub2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(sub2d(input0, input1)).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of sub2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void sub2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(sub2d(input0, input1)).
|
||||
*
|
||||
* @tparam inplace: whether directly store the output to input0
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param activation activation of sub2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @return sub2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto sub2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
sub2d(output, input0, input1, activation, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("sub2d");
|
||||
|
||||
return output;
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
sub2d(input0, input0, input1, activation, assign_core, output_exponent);
|
||||
DL_LOG_NN_LATENCY_END("sub2d");
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
427
tools/sdk/esp32c3/include/esp-face/include/tool/dl_tool.hpp
Normal file
427
tools/sdk/esp32c3/include/esp-face/include/tool/dl_tool.hpp
Normal file
@ -0,0 +1,427 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "esp_system.h"
|
||||
#include "esp_timer.h"
|
||||
#include "freertos/FreeRTOS.h"
|
||||
|
||||
#include "dl_define.hpp"
|
||||
|
||||
extern "C"
|
||||
{
|
||||
#if CONFIG_TIE728_BOOST
|
||||
void dl_tie728_memset_8b(void *ptr, const int value, const int n);
|
||||
void dl_tie728_memset_16b(void *ptr, const int value, const int n);
|
||||
void dl_tie728_memset_32b(void *ptr, const int value, const int n);
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace tool
|
||||
{
|
||||
/**
|
||||
* @brief Set memory zero.
|
||||
*
|
||||
* @param ptr pointer of memory
|
||||
* @param n byte number
|
||||
*/
|
||||
void set_zero(void *ptr, const int n);
|
||||
|
||||
/**
|
||||
* @brief Set array value.
|
||||
*
|
||||
* @tparam T supports all data type, sizeof(T) equals to 1, 2 and 4 will boost by instruction
|
||||
* @param ptr pointer of array
|
||||
* @param value value to set
|
||||
* @param len length of array
|
||||
*/
|
||||
template <typename T>
|
||||
void set_value(T *ptr, const T value, const int len)
|
||||
{
|
||||
#if CONFIG_TIE728_BOOST
|
||||
int *temp = (int *)&value;
|
||||
if (sizeof(T) == 1)
|
||||
dl_tie728_memset_8b(ptr, *temp, len);
|
||||
else if (sizeof(T) == 2)
|
||||
dl_tie728_memset_16b(ptr, *temp, len);
|
||||
else if (sizeof(T) == 4)
|
||||
dl_tie728_memset_32b(ptr, *temp, len);
|
||||
else
|
||||
#endif
|
||||
for (size_t i = 0; i < len; i++)
|
||||
ptr[i] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Copy memory.
|
||||
*
|
||||
* @param dst pointer of destination
|
||||
* @param src pointer of source
|
||||
* @param n byte number
|
||||
*/
|
||||
void copy_memory(void *dst, void *src, const int n);
|
||||
|
||||
/**
|
||||
* @brief Apply memory without initialized. Can use free_aligned() to free the memory.
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *malloc_aligned(int number, int size, int align = 4)
|
||||
{
|
||||
assert((align > 0) && (((align & (align-1)) == 0)));
|
||||
int total_size = number * size;
|
||||
|
||||
void *res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
#if DL_SPIRAM_SUPPORT
|
||||
if (NULL == res)
|
||||
res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_SPIRAM);
|
||||
#endif
|
||||
if (NULL == res)
|
||||
{
|
||||
printf("Fail to malloc %d bytes from DRAM(%d bytyes) and PSRAM(%d bytes), PSRAM is %s.\n",
|
||||
total_size,
|
||||
heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
|
||||
heap_caps_get_free_size(MALLOC_CAP_SPIRAM),
|
||||
DL_SPIRAM_SUPPORT ? "on" : "off");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void *)res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory with zero-initialized. Can use free_aligned() to free the memory.
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *calloc_aligned(int number, int size, int align = 4)
|
||||
{
|
||||
|
||||
void *aligned = malloc_aligned(number, size, align);
|
||||
set_zero(aligned, number * size);
|
||||
|
||||
return (void *)aligned;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Free the calloc_aligned() and malloc_aligned() memory
|
||||
*
|
||||
* @param address pointer of memory to free
|
||||
*/
|
||||
inline void free_aligned(void *address)
|
||||
{
|
||||
if (NULL == address)
|
||||
return;
|
||||
|
||||
heap_caps_free(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory without initialized in preference order: internal aligned, internal, external aligned
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *malloc_aligned_prefer(int number, int size, int align = 4)
|
||||
{
|
||||
assert((align > 0) && (((align & (align-1)) == 0)));
|
||||
int total_size = number * size;
|
||||
void *res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
if (NULL == res){
|
||||
res = heap_caps_malloc(total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
}
|
||||
#if DL_SPIRAM_SUPPORT
|
||||
if (NULL == res){
|
||||
res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_SPIRAM);
|
||||
}
|
||||
#endif
|
||||
if (NULL == res)
|
||||
{
|
||||
printf("Fail to malloc %d bytes from DRAM(%d bytyes) and PSRAM(%d bytes), PSRAM is %s.\n",
|
||||
total_size,
|
||||
heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
|
||||
heap_caps_get_free_size(MALLOC_CAP_SPIRAM),
|
||||
DL_SPIRAM_SUPPORT ? "on" : "off");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory with zero-initialized in preference order: internal aligned, internal, external aligned
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *calloc_aligned_prefer(int number, int size, int align = 4)
|
||||
{
|
||||
void *res = malloc_aligned_prefer(number, size, align);
|
||||
set_zero(res, number * size);
|
||||
|
||||
return (void *)res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Free the calloc_aligned_prefer() and malloc_aligned_prefer() memory
|
||||
*
|
||||
* @param address pointer of memory to free
|
||||
*/
|
||||
inline void free_aligned_prefer(void *address)
|
||||
{
|
||||
if (NULL == address)
|
||||
return;
|
||||
|
||||
heap_caps_free(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Truncate the input into int8_t range.
|
||||
*
|
||||
* @tparam T supports all integer types
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
*/
|
||||
template <typename T>
|
||||
void truncate(int8_t &output, T input)
|
||||
{
|
||||
if (input >= DL_Q8_MAX)
|
||||
output = DL_Q8_MAX;
|
||||
else if (input <= DL_Q8_MIN)
|
||||
output = DL_Q8_MIN;
|
||||
else
|
||||
output = input;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Truncate the input into int16_t range.
|
||||
*
|
||||
* @tparam T supports all integer types
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
*/
|
||||
template <typename T>
|
||||
void truncate(int16_t &output, T input)
|
||||
{
|
||||
if (input >= DL_Q16_MAX)
|
||||
output = DL_Q16_MAX;
|
||||
else if (input <= DL_Q16_MIN)
|
||||
output = DL_Q16_MIN;
|
||||
else
|
||||
output = input;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Calculate the exponent of quantizing 1/n into max_value range.
|
||||
*
|
||||
* @param n 1/n: value to be quantized
|
||||
* @param max_value the max_range
|
||||
*/
|
||||
inline int calculate_exponent(int n, int max_value)
|
||||
{
|
||||
int exp = 0;
|
||||
int tmp = 1 / n;
|
||||
while (tmp < max_value)
|
||||
{
|
||||
exp += 1;
|
||||
tmp = (1 << exp) / n;
|
||||
}
|
||||
exp -= 1;
|
||||
|
||||
return exp;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Print vector in format "[x1, x2, ...]\n".
|
||||
*
|
||||
* @param array to print
|
||||
*/
|
||||
inline void print_vector(std::vector<int> &array, const char *message = NULL)
|
||||
{
|
||||
if (message)
|
||||
printf("%s: ", message);
|
||||
|
||||
printf("[");
|
||||
for (int i = 0; i < array.size(); i++)
|
||||
{
|
||||
printf(", %d" + (i ? 0 : 2), array[i]);
|
||||
}
|
||||
printf("]\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the cycle object
|
||||
*
|
||||
* @return cycle count
|
||||
*/
|
||||
inline uint32_t get_cycle()
|
||||
{
|
||||
uint32_t ccount;
|
||||
__asm__ __volatile__("rsr %0, ccount"
|
||||
: "=a"(ccount)
|
||||
:
|
||||
: "memory");
|
||||
return ccount;
|
||||
}
|
||||
|
||||
class Latency
|
||||
{
|
||||
private:
|
||||
const uint32_t size; /*<! size of queue */
|
||||
uint32_t *queue; /*<! queue for storing history period */
|
||||
uint32_t period; /*<! current period */
|
||||
uint32_t sum; /*<! sum of period */
|
||||
uint32_t count; /*<! the number of added period */
|
||||
uint32_t next; /*<! point to next element in queue */
|
||||
uint32_t timestamp; /*<! record the start >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Latency object.
|
||||
*
|
||||
* @param size
|
||||
*/
|
||||
Latency(const uint32_t size = 1) : size(size),
|
||||
period(0),
|
||||
sum(0),
|
||||
count(0),
|
||||
next(0)
|
||||
{
|
||||
this->queue = (this->size > 1) ? (uint32_t *)calloc(this->size, sizeof(uint32_t)) : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Latency object.
|
||||
*
|
||||
*/
|
||||
~Latency()
|
||||
{
|
||||
if (this->queue)
|
||||
free(this->queue);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Record the start timestamp.
|
||||
*
|
||||
*/
|
||||
void start()
|
||||
{
|
||||
#if DL_LOG_LATENCY_UNIT
|
||||
this->timestamp = get_cycle();
|
||||
#else
|
||||
this->timestamp = esp_timer_get_time();
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Record the period.
|
||||
*
|
||||
*/
|
||||
void end()
|
||||
{
|
||||
#if DL_LOG_LATENCY_UNIT
|
||||
this->period = get_cycle() - this->timestamp;
|
||||
#else
|
||||
this->period = esp_timer_get_time() - this->timestamp;
|
||||
#endif
|
||||
if (this->queue)
|
||||
{
|
||||
this->sum -= this->queue[this->next];
|
||||
this->queue[this->next] = this->period;
|
||||
this->sum += this->queue[this->next];
|
||||
this->next++;
|
||||
this->next = this->next % this->size;
|
||||
if (this->count < this->size)
|
||||
{
|
||||
this->count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Return the period.
|
||||
*
|
||||
* @return this->timestamp_end - this->timestamp
|
||||
*/
|
||||
uint32_t get_period()
|
||||
{
|
||||
return this->period;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the average period.
|
||||
*
|
||||
* @return average latency
|
||||
*/
|
||||
uint32_t get_average_period()
|
||||
{
|
||||
return this->queue ? (this->sum / this->count) : this->period;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Clear the period
|
||||
*
|
||||
*/
|
||||
void clear_period()
|
||||
{
|
||||
this->period = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Print in format "latency: {this->period} {unit}\n".
|
||||
*/
|
||||
void print()
|
||||
{
|
||||
#if DL_LOG_LATENCY_UNIT
|
||||
printf("latency: %15u cycle\n", this->get_average_period());
|
||||
#else
|
||||
printf("latency: %15u us\n", this->get_average_period());
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Print in format "{message}: {this->period} {unit}\n".
|
||||
*
|
||||
* @param message message of print
|
||||
*/
|
||||
void print(const char *message)
|
||||
{
|
||||
#if DL_LOG_LATENCY_UNIT
|
||||
printf("%s: %15u cycle\n", message, this->get_average_period());
|
||||
#else
|
||||
printf("%s: %15u us\n", message, this->get_average_period());
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Print in format "{prefix}::{key}: {this->period} {unit}\n".
|
||||
*
|
||||
* @param prefix prefix of print
|
||||
* @param key key of print
|
||||
*/
|
||||
void print(const char *prefix, const char *key)
|
||||
{
|
||||
#if DL_LOG_LATENCY_UNIT
|
||||
printf("%s::%s: %u cycle\n", prefix, key, this->get_average_period());
|
||||
#else
|
||||
printf("%s::%s: %u us\n", prefix, key, this->get_average_period());
|
||||
#endif
|
||||
}
|
||||
};
|
||||
} // namespace tool
|
||||
} // namespace dl
|
@ -0,0 +1,74 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#if CONFIG_IDF_TARGET_ESP32S3
|
||||
#include "esp32s3/rom/cache.h"
|
||||
#include "soc/extmem_reg.h"
|
||||
#endif
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace tool
|
||||
{
|
||||
namespace cache
|
||||
{
|
||||
/**
|
||||
* @brief Initialize preload.
|
||||
*
|
||||
* @param preload One of 1 or 0,
|
||||
* - 1: turn on the preload
|
||||
* - 0: turn off the preload
|
||||
* @return
|
||||
* - 1: Initialize successfully
|
||||
* - 0: Initialize successfully, autoload has been turned off
|
||||
* - -1: Initialize failed, the chip does not support preload
|
||||
*/
|
||||
int8_t preload_init(uint8_t preload = 1);
|
||||
|
||||
/**
|
||||
* @brief Preload memory.
|
||||
*
|
||||
* @param addr the start address of data to be preloaded
|
||||
* @param size the size of the data in byte to be preloaded
|
||||
*/
|
||||
void preload_func(uint32_t addr, uint32_t size);
|
||||
|
||||
/**
|
||||
* @brief Initialize autoload.
|
||||
*
|
||||
* @param autoload One of 1 or 0,
|
||||
* - 1: turn on the autoload
|
||||
* - 0: turn off the autoload
|
||||
* @param trigger One of 0 or 1 or 2,
|
||||
* - 0: miss, TODO:@yuanjiong
|
||||
* - 1: hit, TODO:@yuanjiong
|
||||
* - 2: both,TODO:@yuanjiong
|
||||
* @param line_size the number of cache lines to be autoloaded
|
||||
* @return status,
|
||||
* - 1: Initialize sucessfully
|
||||
* - 0: Initialize suceesfully, preload has been turned off
|
||||
* - -1: Initialize failed, the chip does not support autoload
|
||||
*/
|
||||
int8_t autoload_init(uint8_t autoload = 1, uint8_t trigger = 2, uint8_t line_size = 0);
|
||||
|
||||
/**
|
||||
* @brief Autoload memory.
|
||||
*
|
||||
* @param addr1 the start address of data1 to be autoloaded
|
||||
* @param size1 the size of the data1 in byte to be preloaded
|
||||
* @param addr2 the start address of data2 to be autoloaded
|
||||
* @param size2 the size of the data2 in byte to be preloaded
|
||||
*/
|
||||
void autoload_func(uint32_t addr1, uint32_t size1, uint32_t addr2, uint32_t size2);
|
||||
|
||||
/**
|
||||
* @brief Autoload memory.
|
||||
*
|
||||
* @param addr1 the start address of data1 to be autoloaded
|
||||
* @param size1 the size of the data1 in byte to be preloaded
|
||||
*/
|
||||
void autoload_func(uint32_t addr1, uint32_t size1);
|
||||
}
|
||||
} // namespace tool
|
||||
} // namespace dl
|
@ -0,0 +1,129 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_define.hpp"
|
||||
#include <vector>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
/**
|
||||
* @brief Base class of Filter, Bias, Activation.
|
||||
*
|
||||
* @tparam T supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize,
|
||||
* - int8_t: stands for operation in int8_t quantize.
|
||||
*/
|
||||
template <typename T>
|
||||
class Constant
|
||||
{
|
||||
public:
|
||||
const T *element; /*<! point to element. >*/
|
||||
const int exponent; /*<! exponent of element. >*/
|
||||
const std::vector<int> shape; /*<! shape of element. >*/
|
||||
|
||||
/**
|
||||
* @brief Construct a new Constant object.
|
||||
*
|
||||
* @param element point to element.
|
||||
* @param exponent exponent of element.
|
||||
* @param shape shape of Constant.
|
||||
*/
|
||||
Constant(const T *element, const int exponent, const std::vector<int> shape);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Filter.
|
||||
* NOTE: The shape format of filter is fixed, but the element sequence depands on optimization method.
|
||||
* - 1D: reserved
|
||||
* - 2D: shape format is [filter_height, filter_width, input_channel, output_channel]. dilation format is [height, width]
|
||||
*
|
||||
* @tparam T supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize,
|
||||
* - int8_t: stands for operation in int8_t quantize.
|
||||
*/
|
||||
template <typename T>
|
||||
class Filter : public Constant<T>
|
||||
{
|
||||
public:
|
||||
const std::vector<int> dilation; /*<! - 1D: reserved >*/
|
||||
/*<! - 2D: [dilation_in_height, dilation_in_width] >*/
|
||||
std::vector<int> shape_with_dilation; /*<! - 1D: reserved >*/
|
||||
/*<! - 2D: [filter_height_with_dilation, filter_width_with_dilation, input_channel, output_channel] >*/
|
||||
std::vector<int> channel_exponent; /*<! exponent for per-channel >*/
|
||||
|
||||
/**
|
||||
* @brief Construct a new Filter object.
|
||||
*
|
||||
* @param element point to element
|
||||
* @param exponent exponent of element
|
||||
* @param shape shape of Filter,
|
||||
* - 1D: reserved
|
||||
* - 2D: for convolution is [filter_height, filter_width, input_channel, output_channel],
|
||||
* for depthwise convolution is [filter_height, filter_width, input_channel, 1]
|
||||
* @param dilation dilation of Filter
|
||||
* - 1D: reserved
|
||||
* - 2D: [dilation_in_height, dilation_in_width]
|
||||
*/
|
||||
Filter(const T *element, const int exponent, const std::vector<int> shape, const std::vector<int> dilation = {1, 1});
|
||||
|
||||
/**
|
||||
* @brief Construct a new Filter object.
|
||||
*
|
||||
* @param element point to element
|
||||
* @param channel_exponent exponent for per-channel
|
||||
* @param shape shape of element
|
||||
* @param dilation dilation of Filter
|
||||
* - 1D: reserved
|
||||
* - 2D: [dilation_in_height, dilation_in_width]
|
||||
*/
|
||||
Filter(const T *element, const std::vector<int> channel_exponent, const std::vector<int> shape, const std::vector<int> dilation = {1, 1});
|
||||
|
||||
/**
|
||||
* @brief Print the n-th filter.
|
||||
*
|
||||
* @param n index of output_channel
|
||||
* @param message to print
|
||||
*/
|
||||
void print2d_n(const int n, const char *message) const;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Bias.
|
||||
*
|
||||
* @tparam T supports int16_t and int8_t
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename T>
|
||||
class Bias : public Constant<T>
|
||||
{
|
||||
public:
|
||||
using Constant<T>::Constant;
|
||||
std::vector<int> channel_exponent; /*<! exponent for per-channel >*/
|
||||
|
||||
Bias(const T *element, const std::vector<int> channel_exponent, const std::vector<int> shape);
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Activation.
|
||||
*
|
||||
* @tparam T supports int16_t and int8_t
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename T>
|
||||
class Activation : public Constant<T>
|
||||
{
|
||||
public:
|
||||
const activation_type_t type; /*<! One of Linear or ReLU or LeakyReLU or PReLU */
|
||||
|
||||
/**
|
||||
* @brief Construct a new Activation object.
|
||||
*
|
||||
* @param type One of Linear or ReLU or LeakyReLU or PReLU
|
||||
* @param element point to element of activation
|
||||
* @param exponent exponent of element
|
||||
* @param shape shape of element
|
||||
*/
|
||||
Activation(const activation_type_t type, const T *element = NULL, const int exponent = 0, const std::vector<int> shape = {0});
|
||||
};
|
||||
} // namespace dl
|
@ -0,0 +1,495 @@
|
||||
#pragma once
|
||||
|
||||
#include <stdio.h>
|
||||
#include <vector>
|
||||
#include <assert.h>
|
||||
#include <iostream>
|
||||
|
||||
#include "dl_tool.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
/**
|
||||
* @brief Tensor
|
||||
*
|
||||
* @tparam T support uint8_t, int8_t, int16_t and float.
|
||||
*/
|
||||
template <typename T>
|
||||
class Tensor
|
||||
{
|
||||
private:
|
||||
int size; /*<! size of element including padding */
|
||||
bool auto_free; /*<! free element when object destroy */
|
||||
std::vector<int> axis_offset; /*<! element offset of each axis */
|
||||
|
||||
public:
|
||||
T *element; /*<! point to element */
|
||||
int exponent; /*<! exponent of element */
|
||||
std::vector<int> shape; /*<! shape of Tensor */
|
||||
|
||||
/**
|
||||
* @brief Construct a new Tensor object
|
||||
*
|
||||
*/
|
||||
Tensor() : auto_free(true), element(NULL), exponent(0) { this->set_shape({0}); }
|
||||
|
||||
/**
|
||||
* @brief Construct a new Tensor object by copying from input.
|
||||
*
|
||||
* @param input an input Tensor
|
||||
* @param deep one of true or false
|
||||
* - true: apply a new memory, copy value from input.element to this new memory
|
||||
* - false: take over input.element to this->element
|
||||
*/
|
||||
Tensor(Tensor<T> &input, bool deep) : size(input.size),
|
||||
auto_free(input.auto_free),
|
||||
exponent(input.exponent)
|
||||
{
|
||||
this->set_shape(input.shape);
|
||||
if (deep && (input.element != NULL))
|
||||
{
|
||||
int size_real = input.get_size();
|
||||
T *new_element = (T *)tool::calloc_aligned_prefer(size_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
}
|
||||
else
|
||||
{
|
||||
this->element = input.element;
|
||||
this->auto_free = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Tensor object
|
||||
*
|
||||
*/
|
||||
~Tensor()
|
||||
{
|
||||
if (this->auto_free)
|
||||
this->free_element();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param input an input Tensor
|
||||
* @param deep one of true or false
|
||||
* - true: apply a new memory, copy value from input.element to this new memory
|
||||
* - false: take over input.element to this->element
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> ©_element(Tensor<T> &input, bool deep)
|
||||
{
|
||||
assert(this->get_size() == input.get_size());
|
||||
assert(input.element != NULL);
|
||||
|
||||
this->malloc_element();
|
||||
if (deep)
|
||||
{
|
||||
tool::copy_memory(this->element, input.element, this->get_size() * sizeof(T));
|
||||
}
|
||||
else
|
||||
{
|
||||
this->element = input.element;
|
||||
this->auto_free = false;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the auto free object.
|
||||
*
|
||||
* @param auto_free one of true or false
|
||||
* - true: free element when object destroyed
|
||||
* - false: do not
|
||||
* @return self
|
||||
*/
|
||||
Tensor<T> &set_auto_free(const bool auto_free)
|
||||
{
|
||||
this->auto_free = auto_free;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the element.
|
||||
*
|
||||
* @param element point to element memory
|
||||
* @return self
|
||||
*/
|
||||
Tensor<T> &set_element(T *element, const bool auto_free = false)
|
||||
{
|
||||
assert(this->element == NULL);
|
||||
this->element = element;
|
||||
this->auto_free = auto_free;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the exponent.
|
||||
*
|
||||
* @param exponent exponent of element
|
||||
* @return self
|
||||
*/
|
||||
Tensor<T> &set_exponent(const int exponent)
|
||||
{
|
||||
this->exponent = exponent;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the shape of Tensor.
|
||||
*
|
||||
* @param shape the target shape
|
||||
*
|
||||
* @return self
|
||||
*/
|
||||
Tensor<T> &set_shape(const std::vector<int> shape);
|
||||
|
||||
/**
|
||||
* @brief print the shape of the Tensor
|
||||
*
|
||||
*/
|
||||
void print_shape()
|
||||
{
|
||||
if (this->shape.size())
|
||||
{
|
||||
printf("shape = (");
|
||||
for (int i = 0; i < this->shape.size() - 1; i++)
|
||||
{
|
||||
printf("%d, ", this->shape[i]);
|
||||
}
|
||||
printf("%d)\n", this->shape.back());
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("shape = ()\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief flatten the Tensor
|
||||
*
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &flatten();
|
||||
|
||||
/**
|
||||
* @brief Change a new shape to the Tensor without changing its data.
|
||||
*
|
||||
* @param shape the target shape
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &reshape(std::vector<int> shape);
|
||||
|
||||
/**
|
||||
* @brief Remove dims with length==1 from Tensor
|
||||
*
|
||||
* @param axis the dim to to be remove. make sure the length of the dim is equal to 1.
|
||||
* if axis == INT32_MAX, all the dims with length==1 will be removed.
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &squeeze(int axis = INT32_MAX);
|
||||
|
||||
/**
|
||||
* @brief Insert a new dim that will appear at the axis position in the expanded Tensor shape.
|
||||
*
|
||||
* @param axis the dim to be inserted
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &expand_dims(int axis);
|
||||
|
||||
/**
|
||||
* @brief Insert a new dim that will appear at the axis position in the expanded Tensor shape.
|
||||
*
|
||||
* @param axis the dim to be inserted
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &expand_dims(std::vector<int> axis);
|
||||
|
||||
/**
|
||||
* @brief Reverse or permute the axes of the Tensor
|
||||
*
|
||||
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &transpose(std::vector<int> perm = {});
|
||||
|
||||
/**
|
||||
* @brief Reverse or permute the axes of the input Tensor
|
||||
*
|
||||
* @param input the input Tensor
|
||||
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &transpose(Tensor<T> &input, std::vector<int> perm = {});
|
||||
|
||||
/**
|
||||
* @brief Get the element pointer.
|
||||
*
|
||||
* @return pointer to memory
|
||||
*/
|
||||
T *get_element_ptr()
|
||||
{
|
||||
return this->element;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the element value.
|
||||
*
|
||||
* @param index the index of each dim.
|
||||
* @return T element value
|
||||
*/
|
||||
T get_element_value(const std::vector<int> index)
|
||||
{
|
||||
return this->element[this->get_element_index(index)];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the element value.
|
||||
*
|
||||
* @param index the index of the element.
|
||||
* @return T element value
|
||||
*/
|
||||
T get_element_value(int index)
|
||||
{
|
||||
return this->element[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the size of Tensor.
|
||||
*
|
||||
* @return the size of Tensor.
|
||||
*/
|
||||
int get_size()
|
||||
{
|
||||
return this->size;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the axis offset
|
||||
*
|
||||
* @return std::vector<int> the axis offset
|
||||
*/
|
||||
std::vector<int> get_axis_offset()
|
||||
{
|
||||
return this->axis_offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory with zero-initialized only if this->element is NULL.
|
||||
*
|
||||
* @param auto_free one of true or false
|
||||
* - true: free element when object destroyed
|
||||
* - false: do not
|
||||
* @return
|
||||
* - true: on success
|
||||
* - false: if applying failed
|
||||
*/
|
||||
bool calloc_element(const bool auto_free = true)
|
||||
{
|
||||
if (this->element != NULL)
|
||||
return false;
|
||||
|
||||
this->element = (T *)dl::tool::calloc_aligned_prefer(this->get_size(), sizeof(T), 16);
|
||||
this->auto_free = auto_free;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory without initialized only if this->element is NULL.
|
||||
*
|
||||
* @param auto_free one of true or false
|
||||
* - true: free element when object destroyed
|
||||
* - false: do not
|
||||
* @return
|
||||
* - true: on success
|
||||
* - false: if applying failed
|
||||
*/
|
||||
bool malloc_element(const bool auto_free = true)
|
||||
{
|
||||
if (this->element != NULL)
|
||||
return false;
|
||||
|
||||
this->element = (T *)tool::malloc_aligned_prefer(this->get_size(), sizeof(T), 16);
|
||||
this->auto_free = auto_free;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief free element only if this->element != NULL
|
||||
* set this->element to NULL, after free
|
||||
* @brief Free element if this->element is not NULL.
|
||||
*/
|
||||
void free_element()
|
||||
{
|
||||
if (this->auto_free && this->element)
|
||||
{
|
||||
tool::free_aligned_prefer(this->element);
|
||||
this->element = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief print the element of the tensor
|
||||
*
|
||||
* @param axis_index_range the element range of each dims to be print. if axis_index_range == {}, all the element will be print.
|
||||
* @param message to print
|
||||
*/
|
||||
void print(std::vector<int> axis_index_range = {}, const char *message = "");
|
||||
|
||||
/**
|
||||
* @brief print all the element of the Tensor.
|
||||
*
|
||||
* @param message to print
|
||||
* @param with_padding one of true or false,
|
||||
* - true: the padding element will also be ed
|
||||
* - false: the padding element will not be ed
|
||||
*/
|
||||
void print_all(const char *message = "")
|
||||
{
|
||||
std::cout << "\n"
|
||||
<< message << " | ";
|
||||
this->print_shape();
|
||||
|
||||
for (int i = 0; i < this->get_size(); i++)
|
||||
{
|
||||
std::cout << this->element[i] << " ";
|
||||
}
|
||||
std::cout << "\n";
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the index of each dims
|
||||
*
|
||||
* @param element_index the index of the element
|
||||
* @return std::vector<int> the index of each dims
|
||||
*/
|
||||
std::vector<int> get_axis_index(int element_index);
|
||||
|
||||
/**
|
||||
* @brief Get the index of element
|
||||
*
|
||||
* @param axis_index the index of each dims
|
||||
* @return int the index of element
|
||||
*/
|
||||
int get_element_index(const std::vector<int> axis_index);
|
||||
|
||||
/**
|
||||
* @brief Check the element value with input ground-truth.
|
||||
*
|
||||
* @param gt_element ground-truth value of element
|
||||
* @param bias permissible error
|
||||
* @param info one of true or false
|
||||
* - true: shape and result
|
||||
* - false: do not
|
||||
* @param failed_number maximum number of wrong element that will be printed
|
||||
*
|
||||
* @return
|
||||
* - true: in permissible error
|
||||
* - false: not
|
||||
*/
|
||||
bool check_element(T *gt_element, int bias = 2, bool info = true, int failed_number = 0)
|
||||
{
|
||||
int count = 0;
|
||||
if (info)
|
||||
this->print_shape();
|
||||
int size = this->get_size();
|
||||
for (int i = 0; i < size; i++)
|
||||
{
|
||||
if (DL_ABS(this->element[i] - gt_element[i]) > bias)
|
||||
{
|
||||
std::vector<int> index = get_axis_index(i);
|
||||
std::cout << "element[";
|
||||
for (int j = 0; j < index.size() - 1; j++)
|
||||
{
|
||||
std::cout << index[j] << ", ";
|
||||
}
|
||||
std::cout << index.back() << "]: ";
|
||||
std::cout << +this->element[i] << " v.s. " << +gt_element[i] << "\n";
|
||||
count++;
|
||||
if (count > failed_number)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (count)
|
||||
return false;
|
||||
|
||||
if (info)
|
||||
printf("PASS\n");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Check the shape is the same as the shape of input.
|
||||
*
|
||||
* @param input an input tensor
|
||||
* @return
|
||||
* - true: same shape
|
||||
* - false: not
|
||||
*/
|
||||
bool is_same_shape(Tensor<T> &input)
|
||||
{
|
||||
if (input.shape.size() != this->shape.size())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < this->shape.size(); i++)
|
||||
{
|
||||
if (input.shape[i] != this->shape[i])
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Tensor<T> &operator=(const Tensor<T> &input)
|
||||
{
|
||||
this->auto_free = input.auto_free;
|
||||
this->exponent = input.exponent;
|
||||
int size_real_tmp = this->size;
|
||||
int size_input_real = input.size;
|
||||
this->set_shape(input.shape);
|
||||
if (input.element)
|
||||
{
|
||||
if (this->element)
|
||||
{
|
||||
if (size_real_tmp != size_input_real)
|
||||
{
|
||||
tool::free_aligned_prefer(this->element);
|
||||
T *new_element = (T *)tool::malloc_aligned_prefer(size_input_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
}
|
||||
else
|
||||
{
|
||||
tool::copy_memory(this->element, input.element, size_input_real * sizeof(T));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
T *new_element = (T *)tool::malloc_aligned_prefer(size_input_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (this->element)
|
||||
{
|
||||
tool::free_aligned_prefer(this->element);
|
||||
this->element = NULL;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace dl
|
@ -10,6 +10,7 @@
|
||||
#include <stdbool.h>
|
||||
#include "esp_err.h"
|
||||
#include "esp_http_server.h"
|
||||
#include "esp_tls.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -20,6 +21,22 @@ typedef enum {
|
||||
HTTPD_SSL_TRANSPORT_INSECURE // SSL disabled
|
||||
} httpd_ssl_transport_mode_t;
|
||||
|
||||
/**
|
||||
* @brief Callback data struct, contains the ESP-TLS connection handle
|
||||
*/
|
||||
typedef struct esp_https_server_user_cb_arg {
|
||||
const esp_tls_t *tls;
|
||||
} esp_https_server_user_cb_arg_t;
|
||||
|
||||
/**
|
||||
* @brief Callback function prototype
|
||||
* Can be used to get connection or client information (SSL context)
|
||||
* E.g. Client certificate, Socket FD, Connection state, etc.
|
||||
*
|
||||
* @param user_cb Callback data struct
|
||||
*/
|
||||
typedef void esp_https_server_user_cb(esp_https_server_user_cb_arg_t *user_cb);
|
||||
|
||||
/**
|
||||
* HTTPS server config struct
|
||||
*
|
||||
@ -66,6 +83,9 @@ struct httpd_ssl_config {
|
||||
|
||||
/** Enable tls session tickets */
|
||||
bool session_tickets;
|
||||
|
||||
/** User callback for esp_https_server */
|
||||
esp_https_server_user_cb *user_cb;
|
||||
};
|
||||
|
||||
typedef struct httpd_ssl_config httpd_ssl_config_t;
|
||||
@ -113,6 +133,7 @@ typedef struct httpd_ssl_config httpd_ssl_config_t;
|
||||
.port_secure = 443, \
|
||||
.port_insecure = 80, \
|
||||
.session_tickets = false, \
|
||||
.user_cb = NULL, \
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -44,6 +44,7 @@ typedef enum {
|
||||
#if SOC_PM_SUPPORT_CPU_PD
|
||||
ESP_PD_DOMAIN_CPU, //!< CPU core
|
||||
#endif
|
||||
ESP_PD_DOMAIN_RTC8M, //!< Internal 8M oscillator
|
||||
ESP_PD_DOMAIN_VDDSDIO, //!< VDD_SDIO
|
||||
ESP_PD_DOMAIN_MAX //!< Number of domains
|
||||
} esp_sleep_pd_domain_t;
|
||||
|
@ -65,6 +65,22 @@ esp_err_t esp_lcd_panel_io_tx_color(esp_lcd_panel_io_handle_t io, int lcd_cmd, c
|
||||
*/
|
||||
esp_err_t esp_lcd_panel_io_del(esp_lcd_panel_io_handle_t io);
|
||||
|
||||
/**
|
||||
* @brief Type of LCD panel IO event data
|
||||
*/
|
||||
typedef struct {
|
||||
} esp_lcd_panel_io_event_data_t;
|
||||
|
||||
/**
|
||||
* @brief Declare the prototype of the function that will be invoked when panel IO finishes transferring color data
|
||||
*
|
||||
* @param[in] panel_io LCD panel IO handle, which is created by factory API like `esp_lcd_new_panel_io_spi()`
|
||||
* @param[in] edata Panel IO event data, fed by driver
|
||||
* @param[in] user_ctx User data, passed from `esp_lcd_panel_io_xxx_config_t`
|
||||
* @return Whether a high priority task has been waken up by this function
|
||||
*/
|
||||
typedef bool (*esp_lcd_panel_io_color_trans_done_cb_t)(esp_lcd_panel_io_handle_t panel_io, esp_lcd_panel_io_event_data_t *edata, void *user_ctx);
|
||||
|
||||
/**
|
||||
* @brief Panel IO configuration structure, for SPI interface
|
||||
*/
|
||||
@ -74,8 +90,8 @@ typedef struct {
|
||||
int spi_mode; /*!< Traditional SPI mode (0~3) */
|
||||
unsigned int pclk_hz; /*!< Frequency of pixel clock */
|
||||
size_t trans_queue_depth; /*!< Size of internal transaction queue */
|
||||
bool (*on_color_trans_done)(esp_lcd_panel_io_handle_t panel_io, void *user_data, void *event_data); /*!< Callback, invoked when color data transfer has finished */
|
||||
void *user_data; /*!< User private data, passed directly to on_trans_frame_done's user_data */
|
||||
esp_lcd_panel_io_color_trans_done_cb_t on_color_trans_done; /*!< Callback invoked when color data transfer has finished */
|
||||
void *user_ctx; /*!< User private data, passed directly to on_color_trans_done's user_ctx */
|
||||
int lcd_cmd_bits; /*!< Bit-width of LCD command */
|
||||
int lcd_param_bits; /*!< Bit-width of LCD parameter */
|
||||
struct {
|
||||
@ -100,8 +116,8 @@ esp_err_t esp_lcd_new_panel_io_spi(esp_lcd_spi_bus_handle_t bus, const esp_lcd_p
|
||||
|
||||
typedef struct {
|
||||
uint32_t dev_addr; /*!< I2C device address */
|
||||
bool (*on_color_trans_done)(esp_lcd_panel_io_handle_t panel_io, void *user_data, void *event_data); /*!< Callback, invoked when color data transfer has finished */
|
||||
void *user_data; /*!< User private data, passed directly to on_trans_frame_done's user_data */
|
||||
esp_lcd_panel_io_color_trans_done_cb_t on_color_trans_done; /*!< Callback invoked when color data transfer has finished */
|
||||
void *user_ctx; /*!< User private data, passed directly to on_color_trans_done's user_ctx */
|
||||
size_t control_phase_bytes; /*!< I2C LCD panel will encode control information (e.g. D/C seclection) into control phase, in several bytes */
|
||||
unsigned int dc_bit_offset; /*!< Offset of the D/C selection bit in control phase */
|
||||
int lcd_cmd_bits; /*!< Bit-width of LCD command */
|
||||
@ -168,8 +184,8 @@ typedef struct {
|
||||
int cs_gpio_num; /*!< GPIO used for CS line, set to -1 will declaim exclusively use of I80 bus */
|
||||
unsigned int pclk_hz; /*!< Frequency of pixel clock */
|
||||
size_t trans_queue_depth; /*!< Transaction queue size, larger queue, higher throughput */
|
||||
bool (*on_color_trans_done)(esp_lcd_panel_io_handle_t panel_io, void *user_data, void *event_data); /*!< Callback, invoked when color data was tranferred done */
|
||||
void *user_data; /*!< User private data, passed directly to on_trans_done's user_data */
|
||||
esp_lcd_panel_io_color_trans_done_cb_t on_color_trans_done; /*!< Callback invoked when color data was tranferred done */
|
||||
void *user_ctx; /*!< User private data, passed directly to on_color_trans_done's user_ctx */
|
||||
int lcd_cmd_bits; /*!< Bit-width of LCD command */
|
||||
int lcd_param_bits; /*!< Bit-width of LCD parameter */
|
||||
struct {
|
||||
|
@ -18,6 +18,37 @@ extern "C" {
|
||||
#if SOC_LCD_RGB_SUPPORTED
|
||||
/**
|
||||
* @brief LCD RGB timing structure
|
||||
*
|
||||
* Total Width
|
||||
* <--------------------------------------------------->
|
||||
* Hsync width HBP Active Width HFP
|
||||
* <---><--><--------------------------------------><--->
|
||||
* ____ ____|_______________________________________|____|
|
||||
* |___| | | |
|
||||
* | | |
|
||||
* __| | | |
|
||||
* /|\ /|\ | | | |
|
||||
* | VSYNC| | | | |
|
||||
* |Width\|/ |__ | | |
|
||||
* | /|\ | | | |
|
||||
* | VBP | | | | |
|
||||
* | \|/_____|_________|_______________________________________| |
|
||||
* | /|\ | | / / / / / / / / / / / / / / / / / / / | |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* Total | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* Heigh | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* |Active| | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* |Heigh | | |/ / / / / / Active Display Area / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | \|/_____|_________|_______________________________________| |
|
||||
* | /|\ | |
|
||||
* | VFP | | |
|
||||
* \|/ \|/_____|______________________________________________________|
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int pclk_hz; /*!< Frequency of pixel clock */
|
||||
@ -38,6 +69,22 @@ typedef struct {
|
||||
} flags;
|
||||
} esp_lcd_rgb_timing_t;
|
||||
|
||||
/**
|
||||
* @brief Type of RGB LCD panel event data
|
||||
*/
|
||||
typedef struct {
|
||||
} esp_lcd_rgb_panel_event_data_t;
|
||||
|
||||
/**
|
||||
* @brief Declare the prototype of the function that will be invoked when panel IO finishes transferring color data
|
||||
*
|
||||
* @param[in] panel LCD panel handle, returned from `esp_lcd_new_rgb_panel`
|
||||
* @param[in] edata Panel event data, fed by driver
|
||||
* @param[in] user_ctx User data, passed from `esp_lcd_rgb_panel_config_t`
|
||||
* @return Whether a high priority task has been waken up by this function
|
||||
*/
|
||||
typedef bool (*esp_lcd_rgb_panel_frame_trans_done_cb_t)(esp_lcd_panel_handle_t panel, esp_lcd_rgb_panel_event_data_t *edata, void *user_ctx);
|
||||
|
||||
/**
|
||||
* @brief LCD RGB panel configuration structure
|
||||
*/
|
||||
@ -51,8 +98,8 @@ typedef struct {
|
||||
int pclk_gpio_num; /*!< GPIO used for PCLK signal */
|
||||
int data_gpio_nums[SOC_LCD_RGB_DATA_WIDTH]; /*!< GPIOs used for data lines */
|
||||
int disp_gpio_num; /*!< GPIO used for display control signal, set to -1 if it's not used */
|
||||
bool (*on_frame_trans_done)(esp_lcd_panel_handle_t panel, void *user_data); /*!< Callback, invoked when one frame buffer has transferred done */
|
||||
void *user_data; /*!< User data which would be passed to on_frame_trans_done's user_data */
|
||||
esp_lcd_rgb_panel_frame_trans_done_cb_t on_frame_trans_done; /*!< Callback invoked when one frame buffer has transferred done */
|
||||
void *user_ctx; /*!< User data which would be passed to on_frame_trans_done's user_ctx */
|
||||
struct {
|
||||
unsigned int disp_active_low: 1; /*!< If this flag is enabled, a low level of display control signal can turn the screen on; vice versa */
|
||||
unsigned int relax_on_idle: 1; /*!< If this flag is enabled, the host won't refresh the LCD if nothing changed in host's frame buffer (this is usefull for LCD with built-in GRAM) */
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/poll.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/dirent.h>
|
||||
#include <string.h>
|
||||
#include "sdkconfig.h"
|
||||
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef __ESP_COEXIST_H__
|
||||
#define __ESP_COEXIST_H__
|
||||
@ -32,6 +24,13 @@ typedef enum {
|
||||
ESP_COEX_PREFER_NUM, /*!< Prefer value numbers */
|
||||
} esp_coex_prefer_t;
|
||||
|
||||
typedef enum {
|
||||
EXTERN_COEX_WIRE_1 = 0,
|
||||
EXTERN_COEX_WIRE_2,
|
||||
EXTERN_COEX_WIRE_3,
|
||||
EXTERN_COEX_WIRE_NUM,
|
||||
} external_coex_wire_t;
|
||||
|
||||
/**
|
||||
* @brief coex status type
|
||||
*/
|
||||
@ -41,6 +40,36 @@ typedef enum {
|
||||
ESP_COEX_ST_TYPE_BT,
|
||||
} esp_coex_status_type_t;
|
||||
|
||||
/**
|
||||
* @brief external coex gpio pti
|
||||
*/
|
||||
typedef struct {
|
||||
int32_t in_pin0;
|
||||
int32_t in_pin1;
|
||||
int32_t out_pin0;
|
||||
} esp_external_coex_gpio_set_t;
|
||||
|
||||
/**
|
||||
* @brief external coex pti level
|
||||
*/
|
||||
typedef enum {
|
||||
EXTERN_COEX_PTI_MID = 0,
|
||||
EXTERN_COEX_PTI_HIGH,
|
||||
EXTERN_COEX_PTI_NUM,
|
||||
} esp_coex_pti_level_t;
|
||||
|
||||
/**
|
||||
* @brief external coex pti
|
||||
*/
|
||||
typedef struct {
|
||||
uint32_t in_pti1;
|
||||
uint32_t in_pti2;
|
||||
uint32_t in_pti3;
|
||||
uint32_t out_pti1;
|
||||
uint32_t out_pti2;
|
||||
uint32_t out_pti3;
|
||||
} esp_external_coex_pti_set_t;
|
||||
|
||||
#define ESP_COEX_BLE_ST_MESH_CONFIG 0x08
|
||||
#define ESP_COEX_BLE_ST_MESH_TRAFFIC 0x10
|
||||
#define ESP_COEX_BLE_ST_MESH_STANDBY 0x20
|
||||
@ -84,6 +113,18 @@ esp_err_t esp_coex_status_bit_set(esp_coex_status_type_t type, uint32_t status);
|
||||
*/
|
||||
esp_err_t esp_coex_status_bit_clear(esp_coex_status_type_t type, uint32_t status);
|
||||
|
||||
#if CONFIG_EXTERNAL_COEX_ENABLE
|
||||
/**
|
||||
* @brief Setup gpio pin and corresponding pti level, start external coex.
|
||||
* @param wire_type : to select the whole external coex gpio number.
|
||||
* @param gpio_pin : gpio pin number to choose.
|
||||
* @return : ESP_OK - success, other - failed
|
||||
*/
|
||||
esp_err_t esp_enable_extern_coex_gpio_pin(external_coex_wire_t wire_type,
|
||||
esp_external_coex_gpio_set_t gpio_pin);
|
||||
|
||||
esp_err_t esp_disable_extern_coex_gpio_pin();
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -1,21 +1,14 @@
|
||||
// Copyright 2018-2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef __ESP_COEXIST_INTERNAL_H__
|
||||
#define __ESP_COEXIST_INTERNAL_H__
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "esp_coexist.h"
|
||||
#include "esp_coexist_adapter.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -210,6 +203,29 @@ int coex_schm_curr_phase_idx_get(void);
|
||||
*/
|
||||
esp_err_t esp_coex_adapter_register(coex_adapter_funcs_t *funcs);
|
||||
|
||||
#if CONFIG_EXTERNAL_COEX_ENABLE
|
||||
/**
|
||||
* @brief Set external coexistence pti level and enable it.
|
||||
*
|
||||
* @param level1 external coex low pti
|
||||
* @param level2 external coex mid pti
|
||||
* @param level3 external coex high pti
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK: succeed
|
||||
*/
|
||||
esp_err_t esp_coex_external_set(esp_coex_pti_level_t level1,
|
||||
esp_coex_pti_level_t level2, esp_coex_pti_level_t level3);
|
||||
|
||||
/**
|
||||
* @brief Disable external coexist
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK: succeed
|
||||
*/
|
||||
void esp_coex_external_stop(void);
|
||||
#endif /*External Coex*/
|
||||
|
||||
/**
|
||||
* @brief Check the MD5 values of the coexistence adapter header files in IDF and WiFi library
|
||||
*
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
|
||||
#ifndef __ESP_WIFI_TYPES_H__
|
||||
@ -80,6 +72,7 @@ typedef enum {
|
||||
WIFI_REASON_ASSOC_NOT_AUTHED = 9,
|
||||
WIFI_REASON_DISASSOC_PWRCAP_BAD = 10,
|
||||
WIFI_REASON_DISASSOC_SUPCHAN_BAD = 11,
|
||||
WIFI_REASON_BSS_TRANSITION_DISASSOC = 12,
|
||||
WIFI_REASON_IE_INVALID = 13,
|
||||
WIFI_REASON_MIC_FAILURE = 14,
|
||||
WIFI_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
|
||||
@ -250,7 +243,8 @@ typedef struct {
|
||||
wifi_pmf_config_t pmf_cfg; /**< Configuration for Protected Management Frame. Will be advertized in RSN Capabilities in RSN IE. */
|
||||
uint32_t rm_enabled:1; /**< Whether Radio Measurements are enabled for the connection */
|
||||
uint32_t btm_enabled:1; /**< Whether BSS Transition Management is enabled for the connection */
|
||||
uint32_t reserved:30; /**< Reserved for future feature set */
|
||||
uint32_t mbo_enabled:1; /**< Whether MBO is enabled for the connection */
|
||||
uint32_t reserved:29; /**< Reserved for future feature set */
|
||||
} wifi_sta_config_t;
|
||||
|
||||
/** @brief Configuration data for ESP32 AP or STA.
|
||||
|
@ -90,7 +90,6 @@
|
||||
#define portNUM_PROCESSORS 1
|
||||
#endif
|
||||
|
||||
#define configASSERT_2 0
|
||||
#define portUSING_MPU_WRAPPERS 0
|
||||
#define configUSE_MUTEX 1
|
||||
|
||||
@ -206,7 +205,6 @@
|
||||
#define configGENERATE_RUN_TIME_STATS 1 /* Used by vTaskGetRunTimeStats() */
|
||||
#endif
|
||||
|
||||
#define configUSE_TRACE_FACILITY_2 0
|
||||
#define configBENCHMARK 0
|
||||
#define configUSE_16_BIT_TICKS 0
|
||||
#define configIDLE_SHOULD_YIELD 0
|
||||
@ -306,4 +304,9 @@ extern void vPortCleanUpTCB ( void *pxTCB );
|
||||
|
||||
#define configTASK_NOTIFICATION_ARRAY_ENTRIES 1
|
||||
|
||||
// backward compatibility for 4.4
|
||||
#define xTaskRemoveFromUnorderedEventList vTaskRemoveFromUnorderedEventList
|
||||
|
||||
#define configNUM_CORES portNUM_PROCESSORS
|
||||
|
||||
#endif /* FREERTOS_CONFIG_H */
|
||||
|
@ -64,7 +64,7 @@
|
||||
* used to create a synchronisation point between multiple tasks (a
|
||||
* 'rendezvous').
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup EventGroup EventGroup
|
||||
* @endcond
|
||||
*/
|
||||
@ -78,7 +78,7 @@
|
||||
* xEventGroupCreate() returns an EventGroupHandle_t variable that can then
|
||||
* be used as a parameter to other event group functions.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup EventGroupHandle_t EventGroupHandle_t
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -94,7 +94,7 @@ typedef struct EventGroupDef_t * EventGroupHandle_t;
|
||||
* number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1,
|
||||
* 32 bits if set to 0.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup EventBits_t EventBits_t
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -102,7 +102,7 @@ typedef struct EventGroupDef_t * EventGroupHandle_t;
|
||||
typedef TickType_t EventBits_t;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventGroupHandle_t xEventGroupCreate( void );
|
||||
@ -152,7 +152,7 @@ typedef TickType_t EventBits_t;
|
||||
* // The event group was created.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupCreate xEventGroupCreate
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -162,7 +162,7 @@ typedef TickType_t EventBits_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer );
|
||||
@ -217,7 +217,7 @@ typedef TickType_t EventBits_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
@ -307,7 +307,7 @@ typedef TickType_t EventBits_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupWaitBits xEventGroupWaitBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -319,7 +319,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear );
|
||||
@ -372,7 +372,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupClearBits xEventGroupClearBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -381,7 +381,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
|
||||
@ -432,7 +432,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -446,7 +446,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
|
||||
@ -516,7 +516,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupSetBits xEventGroupSetBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -525,7 +525,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
@ -595,7 +595,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -610,7 +610,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
@ -732,7 +732,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupSync xEventGroupSync
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -744,7 +744,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup );
|
||||
@ -758,7 +758,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
*
|
||||
* @return The event group bits at the time xEventGroupGetBits() was called.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupGetBits xEventGroupGetBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -766,7 +766,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup );
|
||||
@ -779,7 +779,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
*
|
||||
* @return The event group bits at the time xEventGroupGetBitsFromISR() was called.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -787,7 +787,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* void xEventGroupDelete( EventGroupHandle_t xEventGroup );
|
||||
@ -802,7 +802,7 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEG
|
||||
*/
|
||||
void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/* For internal use only. */
|
||||
void vEventGroupSetBitsCallback( void * pvEventGroup,
|
||||
|
@ -85,7 +85,7 @@ typedef void * MessageBufferHandle_t;
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -139,7 +139,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferCreate xMessageBufferCreate
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -148,7 +148,7 @@ typedef void * MessageBufferHandle_t;
|
||||
( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -210,7 +210,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferCreateStatic xMessageBufferCreateStatic
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -219,7 +219,7 @@ typedef void * MessageBufferHandle_t;
|
||||
( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -314,7 +314,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSend xMessageBufferSend
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -323,7 +323,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -423,7 +423,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSendFromISR xMessageBufferSendFromISR
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -432,7 +432,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -516,7 +516,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReceive xMessageBufferReceive
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -526,7 +526,7 @@ typedef void * MessageBufferHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -622,7 +622,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReceiveFromISR xMessageBufferReceiveFromISR
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -631,7 +631,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -654,7 +654,7 @@ typedef void * MessageBufferHandle_t;
|
||||
vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* BaseType_t xMessageBufferIsFull( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -674,7 +674,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* BaseType_t xMessageBufferIsEmpty( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -693,7 +693,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* BaseType_t xMessageBufferReset( MessageBufferHandle_t xMessageBuffer );
|
||||
@ -712,7 +712,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* the message queue to wait for space to become available, or to wait for a
|
||||
* a message to be available, then pdFAIL is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReset xMessageBufferReset
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -722,7 +722,7 @@ typedef void * MessageBufferHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* size_t xMessageBufferSpaceAvailable( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -740,7 +740,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* architecture, so if xMessageBufferSpacesAvailable() returns 10, then the size
|
||||
* of the largest message that can be written to the message buffer is 6 bytes.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSpaceAvailable xMessageBufferSpaceAvailable
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -751,7 +751,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) /* Corrects typo in original macro name. */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* size_t xMessageBufferNextLengthBytes( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -767,7 +767,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* @return The length (in bytes) of the next message in the message buffer, or 0
|
||||
* if the message buffer is empty.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferNextLengthBytes xMessageBufferNextLengthBytes
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -776,7 +776,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferNextMessageLengthBytes( ( StreamBufferHandle_t ) xMessageBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -811,7 +811,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSendCompletedFromISR xMessageBufferSendCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -820,7 +820,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -856,7 +856,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReceiveCompletedFromISR xMessageBufferReceiveCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
|
@ -62,7 +62,7 @@ typedef struct QueueDefinition * QueueSetHandle_t;
|
||||
*/
|
||||
typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/* For internal use only. */
|
||||
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
|
||||
@ -80,7 +80,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* QueueHandle_t xQueueCreate(
|
||||
@ -146,7 +146,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueCreate xQueueCreate
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -156,7 +156,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* QueueHandle_t xQueueCreateStatic(
|
||||
@ -235,7 +235,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueCreateStatic xQueueCreateStatic
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -245,7 +245,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToToFront(
|
||||
@ -321,7 +321,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -330,7 +330,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToBack(
|
||||
@ -408,7 +408,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -417,7 +417,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSend(
|
||||
@ -497,7 +497,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -506,7 +506,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueOverwrite(
|
||||
@ -585,7 +585,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ...
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueOverwrite xQueueOverwrite
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -595,7 +595,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueGenericSend(
|
||||
@ -678,7 +678,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -689,7 +689,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueuePeek(
|
||||
@ -780,7 +780,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueuePeek xQueuePeek
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -790,7 +790,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueuePeekFromISR(
|
||||
@ -820,7 +820,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
* @return pdTRUE if an item was successfully received from the queue,
|
||||
* otherwise pdFALSE.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueuePeekFromISR xQueuePeekFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -829,7 +829,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
void * const pvBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueReceive(
|
||||
@ -917,7 +917,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueReceive xQueueReceive
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -927,7 +927,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );
|
||||
@ -940,7 +940,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
*
|
||||
* @return The number of messages available in the queue.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -948,7 +948,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );
|
||||
@ -963,7 +963,7 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
*
|
||||
* @return The number of spaces available in the queue.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -971,7 +971,7 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* void vQueueDelete( QueueHandle_t xQueue );
|
||||
@ -983,7 +983,7 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
*
|
||||
* @param xQueue A handle to the queue to be deleted.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vQueueDelete vQueueDelete
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -991,7 +991,7 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToFrontFromISR(
|
||||
@ -1057,7 +1057,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSendFromISR xQueueSendFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1067,7 +1067,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToBackFromISR(
|
||||
@ -1133,7 +1133,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSendFromISR xQueueSendFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1142,7 +1142,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueOverwriteFromISR(
|
||||
@ -1225,7 +1225,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1234,7 +1234,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueOVERWRITE )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendFromISR(
|
||||
@ -1304,7 +1304,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSendFromISR xQueueSendFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1312,10 +1312,10 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) \
|
||||
xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**@{*/
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueGenericSendFromISR(
|
||||
@ -1402,7 +1402,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueReceiveFromISR(
|
||||
@ -1487,7 +1487,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueReceiveFromISR xQueueReceiveFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1504,7 +1504,7 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FU
|
||||
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/*
|
||||
* The functions defined above are for passing data to and from tasks. The
|
||||
* functions below are the equivalents for passing data to and from
|
||||
@ -1778,7 +1778,7 @@ QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
|
||||
*/
|
||||
QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/* Not public API functions. */
|
||||
void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
|
||||
|
@ -39,7 +39,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define semSEMAPHORE_QUEUE_ITEM_LENGTH ( ( uint8_t ) 0U )
|
||||
#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U )
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
@ -88,7 +88,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -106,7 +106,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateBinary( void );
|
||||
@ -163,7 +163,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateBinary xSemaphoreCreateBinary
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -173,7 +173,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer );
|
||||
@ -229,7 +229,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* // Rest of task code goes here.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -239,7 +239,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreTake(
|
||||
@ -304,7 +304,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreTake xSemaphoreTake
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -312,7 +312,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreTake( xSemaphore, xBlockTime ) xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreTakeRecursive(
|
||||
@ -403,7 +403,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreTakeRecursive xSemaphoreTakeRecursive
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -465,7 +465,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreGive xSemaphoreGive
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -473,7 +473,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGive( xSemaphore ) xQueueGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreGiveRecursive( SemaphoreHandle_t xMutex );
|
||||
@ -555,7 +555,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreGiveRecursive xSemaphoreGiveRecursive
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -641,7 +641,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreGiveFromISR xSemaphoreGiveFromISR
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -649,7 +649,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGiveFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueGiveFromISR( ( QueueHandle_t ) ( xSemaphore ), ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreTakeFromISR(
|
||||
@ -686,7 +686,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreTakeFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueReceiveFromISR( ( QueueHandle_t ) ( xSemaphore ), NULL, ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateMutex( void );
|
||||
@ -741,7 +741,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -751,7 +751,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer );
|
||||
@ -808,7 +808,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* // so there is no need to check it.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -951,7 +951,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount );
|
||||
@ -1027,7 +1027,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -1037,7 +1037,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer );
|
||||
@ -1118,7 +1118,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* // is no need to check its value.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -1128,7 +1128,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* void vSemaphoreDelete( SemaphoreHandle_t xSemaphore );
|
||||
@ -1140,7 +1140,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
*
|
||||
* @param xSemaphore A handle to the semaphore to be deleted.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* \defgroup vSemaphoreDelete vSemaphoreDelete
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -1148,7 +1148,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr.h
|
||||
* @code{c}
|
||||
* TaskHandle_t xSemaphoreGetMutexHolder( SemaphoreHandle_t xMutex );
|
||||
@ -1167,7 +1167,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr.h
|
||||
* @code{c}
|
||||
* TaskHandle_t xSemaphoreGetMutexHolderFromISR( SemaphoreHandle_t xMutex );
|
||||
@ -1182,7 +1182,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr.h
|
||||
* @code{c}
|
||||
* UBaseType_t uxSemaphoreGetCount( SemaphoreHandle_t xSemaphore );
|
||||
|
@ -71,7 +71,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -134,7 +134,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferCreate xStreamBufferCreate
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -142,7 +142,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -220,7 +220,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -229,7 +229,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -319,7 +319,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSend xStreamBufferSend
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -330,7 +330,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -424,7 +424,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
* taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSendFromISR xStreamBufferSendFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -435,7 +435,7 @@ size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -517,7 +517,7 @@ size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReceive xStreamBufferReceive
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -528,7 +528,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -607,7 +607,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
* taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReceiveFromISR xStreamBufferReceiveFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -618,7 +618,7 @@ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -636,7 +636,7 @@ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
*
|
||||
* @param xStreamBuffer The handle of the stream buffer to be deleted.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vStreamBufferDelete vStreamBufferDelete
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -644,7 +644,7 @@ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -660,7 +660,7 @@ void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTI
|
||||
* @return If the stream buffer is full then pdTRUE is returned. Otherwise
|
||||
* pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferIsFull xStreamBufferIsFull
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -668,7 +668,7 @@ void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTI
|
||||
BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -684,7 +684,7 @@ BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_
|
||||
* @return If the stream buffer is empty then pdTRUE is returned. Otherwise
|
||||
* pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferIsEmpty xStreamBufferIsEmpty
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -692,7 +692,7 @@ BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_
|
||||
BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -711,7 +711,7 @@ BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED
|
||||
* a task blocked waiting to send to or read from the stream buffer then the
|
||||
* stream buffer is not reset and pdFAIL is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReset xStreamBufferReset
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -719,7 +719,7 @@ BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED
|
||||
BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -736,7 +736,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_F
|
||||
* @return The number of bytes that can be written to the stream buffer before
|
||||
* the stream buffer would be full.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSpacesAvailable xStreamBufferSpacesAvailable
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -744,7 +744,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_F
|
||||
size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -761,7 +761,7 @@ size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVIL
|
||||
* @return The number of bytes that can be read from the stream buffer before
|
||||
* the stream buffer would be empty.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferBytesAvailable xStreamBufferBytesAvailable
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -769,7 +769,7 @@ size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVIL
|
||||
size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -802,7 +802,7 @@ size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILE
|
||||
* then the trigger level will be updated and pdTRUE is returned. Otherwise
|
||||
* pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSetTriggerLevel xStreamBufferSetTriggerLevel
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -811,7 +811,7 @@ BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
|
||||
size_t xTriggerLevel ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -846,7 +846,7 @@ BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSendCompletedFromISR xStreamBufferSendCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -855,7 +855,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
||||
BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -891,7 +891,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReceiveCompletedFromISR xStreamBufferReceiveCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -899,7 +899,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
||||
BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/* Functions below here are not part of the public API. */
|
||||
StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes,
|
||||
size_t xTriggerLevelBytes,
|
||||
|
@ -76,7 +76,7 @@
|
||||
* returns (via a pointer parameter) an TaskHandle_t variable that can then
|
||||
* be used as a parameter to vTaskDelete to delete the task.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup TaskHandle_t TaskHandle_t
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -114,7 +114,7 @@ typedef enum
|
||||
eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */
|
||||
} eNotifyAction;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**
|
||||
* Used internally only.
|
||||
*/
|
||||
@ -189,11 +189,13 @@ typedef enum
|
||||
#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro for forcing a context switch.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskYIELD taskYIELD
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -201,7 +203,9 @@ typedef enum
|
||||
#define taskYIELD() portYIELD()
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to mark the start of a critical code region. Preemptive context
|
||||
* switches cannot occur when in a critical region.
|
||||
@ -209,7 +213,7 @@ typedef enum
|
||||
* @note This may alter the stack (depending on the portable implementation)
|
||||
* so must be used with care!
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskENTER_CRITICAL taskENTER_CRITICAL
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -228,7 +232,9 @@ typedef enum
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to mark the end of a critical code region. Preemptive context
|
||||
* switches cannot occur when in a critical region.
|
||||
@ -236,7 +242,7 @@ typedef enum
|
||||
* @note This may alter the stack (depending on the portable implementation)
|
||||
* so must be used with care!
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -255,11 +261,13 @@ typedef enum
|
||||
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( )
|
||||
#endif // ESP_PLATFORM
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to disable all maskable interrupts.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -267,11 +275,13 @@ typedef enum
|
||||
#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS()
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to enable microcontroller interrupts.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -422,7 +432,7 @@ typedef enum
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreate xTaskCreate
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -430,14 +440,14 @@ typedef enum
|
||||
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
|
||||
|
||||
static inline IRAM_ATTR BaseType_t xTaskCreate(
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName,
|
||||
const uint32_t usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pvCreatedTask)
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const uint32_t usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pxCreatedTask) PRIVILEGED_FUNCTION
|
||||
{
|
||||
return xTaskCreatePinnedToCore( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pvCreatedTask, tskNO_AFFINITY );
|
||||
return xTaskCreatePinnedToCore( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask, tskNO_AFFINITY );
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -599,20 +609,20 @@ typedef enum
|
||||
|
||||
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
static inline IRAM_ATTR TaskHandle_t xTaskCreateStatic(
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName,
|
||||
const uint32_t ulStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
StackType_t * const pxStackBuffer,
|
||||
StaticTask_t * const pxTaskBuffer)
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const uint32_t ulStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
StackType_t * const puxStackBuffer,
|
||||
StaticTask_t * const pxTaskBuffer) PRIVILEGED_FUNCTION
|
||||
{
|
||||
return xTaskCreateStaticPinnedToCore( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, pxStackBuffer, pxTaskBuffer, tskNO_AFFINITY );
|
||||
return xTaskCreateStaticPinnedToCore( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY );
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
|
||||
@ -683,18 +693,18 @@ typedef enum
|
||||
* for( ;; );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreateRestricted xTaskCreateRestricted
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
*/
|
||||
#if ( portUSING_MPU_WRAPPERS == 1 )
|
||||
BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
|
||||
TaskHandle_t * pxCreatedTask );
|
||||
TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
|
||||
@ -777,7 +787,7 @@ typedef enum
|
||||
* for( ;; );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -788,7 +798,7 @@ typedef enum
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );
|
||||
@ -833,7 +843,7 @@ typedef enum
|
||||
* // defined or shared regions have been declared elsewhere).
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreateRestricted xTaskCreateRestricted
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -842,7 +852,7 @@ void vTaskAllocateMPURegions( TaskHandle_t xTask,
|
||||
const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskDelete( TaskHandle_t xTask );
|
||||
@ -881,7 +891,7 @@ void vTaskAllocateMPURegions( TaskHandle_t xTask,
|
||||
* vTaskDelete( xHandle );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskDelete vTaskDelete
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -893,10 +903,12 @@ void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
|
||||
*----------------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskDelay( const TickType_t xTicksToDelay );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Delay a task for a given number of ticks. The actual time that the
|
||||
* task remains blocked depends on the tick rate. The constant
|
||||
@ -938,7 +950,7 @@ void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskDelay vTaskDelay
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -946,10 +958,12 @@ void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
|
||||
void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* INCLUDE_xTaskDelayUntil must be defined as 1 for this function to be available.
|
||||
* See the configuration section for more information.
|
||||
@ -1007,7 +1021,7 @@ void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskDelayUntil xTaskDelayUntil
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1026,7 +1040,7 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskAbortDelay( TaskHandle_t xTask );
|
||||
@ -1054,7 +1068,7 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
||||
* @return If the task referenced by xTask was not in the Blocked state then
|
||||
* pdFAIL is returned. Otherwise pdPASS is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskAbortDelay xTaskAbortDelay
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1062,7 +1076,7 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
||||
BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );
|
||||
@ -1107,7 +1121,7 @@ BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxTaskPriorityGet uxTaskPriorityGet
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1115,7 +1129,7 @@ BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );
|
||||
@ -1127,7 +1141,7 @@ UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* eTaskState eTaskGetState( TaskHandle_t xTask );
|
||||
@ -1149,7 +1163,7 @@ UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNC
|
||||
eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );
|
||||
@ -1203,7 +1217,7 @@ eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
* eInvalid ); // Include the task state in xTaskDetails.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskGetInfo vTaskGetInfo
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1214,7 +1228,7 @@ void vTaskGetInfo( TaskHandle_t xTask,
|
||||
eTaskState eState ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );
|
||||
@ -1254,7 +1268,7 @@ void vTaskGetInfo( TaskHandle_t xTask,
|
||||
* vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskPrioritySet vTaskPrioritySet
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1263,7 +1277,7 @@ void vTaskPrioritySet( TaskHandle_t xTask,
|
||||
UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskSuspend( TaskHandle_t xTaskToSuspend );
|
||||
@ -1312,7 +1326,7 @@ void vTaskPrioritySet( TaskHandle_t xTask,
|
||||
* // with our handle as the parameter.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskSuspend vTaskSuspend
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1320,7 +1334,7 @@ void vTaskPrioritySet( TaskHandle_t xTask,
|
||||
void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskResume( TaskHandle_t xTaskToResume );
|
||||
@ -1367,7 +1381,7 @@ void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION;
|
||||
* // time in accordance with its priority within the system.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskResume vTaskResume
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1375,7 +1389,7 @@ void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION;
|
||||
void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void xTaskResumeFromISR( TaskHandle_t xTaskToResume );
|
||||
@ -1402,7 +1416,7 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
* otherwise pdFALSE. This is used by the ISR to determine if a context switch
|
||||
* may be required following the ISR.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskResumeFromISR vTaskResumeFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1412,9 +1426,9 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
/*-----------------------------------------------------------
|
||||
* SCHEDULER CONTROL
|
||||
*----------------------------------------------------------*/
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskStartScheduler( void );
|
||||
@ -1445,7 +1459,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskStartScheduler vTaskStartScheduler
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1453,7 +1467,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskEndScheduler( void );
|
||||
@ -1507,7 +1521,7 @@ void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskEndScheduler vTaskEndScheduler
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1517,7 +1531,7 @@ void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskSuspendAll( void );
|
||||
@ -1566,7 +1580,7 @@ void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskSuspendAll vTaskSuspendAll
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1574,7 +1588,7 @@ void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskResumeAll( void );
|
||||
@ -1626,7 +1640,7 @@ void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskResumeAll xTaskResumeAll
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1638,7 +1652,7 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
|
||||
*----------------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* TickType_t xTaskGetTickCount( void );
|
||||
@ -1647,7 +1661,7 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
|
||||
*
|
||||
* @return The count of ticks since vTaskStartScheduler was called.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskGetTickCount xTaskGetTickCount
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1655,7 +1669,7 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
|
||||
TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* TickType_t xTaskGetTickCountFromISR( void );
|
||||
@ -1669,7 +1683,7 @@ TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
|
||||
* microcontroller being used or interrupt nesting is either not supported or
|
||||
* not being used.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1677,7 +1691,7 @@ TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
|
||||
TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* uint16_t uxTaskGetNumberOfTasks( void );
|
||||
@ -1689,7 +1703,7 @@ TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
|
||||
* has been deleted but not yet freed by the idle task will also be
|
||||
* included in the count.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1697,7 +1711,7 @@ TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* char *pcTaskGetName( TaskHandle_t xTaskToQuery );
|
||||
@ -1708,7 +1722,7 @@ UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
|
||||
* xTaskToQuery. A task can query its own name by either passing in its own
|
||||
* handle, or by setting xTaskToQuery to NULL.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup pcTaskGetName pcTaskGetName
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1716,7 +1730,7 @@ UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
|
||||
char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );
|
||||
@ -1730,7 +1744,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lin
|
||||
* NULL is returned if no matching name is found. INCLUDE_xTaskGetHandle
|
||||
* must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup pcTaskGetHandle pcTaskGetHandle
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1813,7 +1827,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
#ifdef configUSE_APPLICATION_TASK_TAG
|
||||
#if configUSE_APPLICATION_TASK_TAG == 1
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );
|
||||
@ -1830,7 +1844,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
TaskHookFunction_t pxHookFunction ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void xTaskGetApplicationTaskTag( TaskHandle_t xTask );
|
||||
@ -1844,7 +1858,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );
|
||||
@ -1932,7 +1946,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
#if ( configCHECK_FOR_STACK_OVERFLOW > 0 )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationStackOverflowHook( TaskHandle_t xTask char *pcTaskName);
|
||||
@ -1952,7 +1966,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
|
||||
#if ( configUSE_TICK_HOOK > 0 )
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationTickHook( void );
|
||||
@ -1967,7 +1981,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer, StackType_t ** ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize )
|
||||
@ -1986,7 +2000,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );
|
||||
@ -2155,7 +2169,7 @@ UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
|
||||
* enough to contain the generated report. Approximately 40 bytes per
|
||||
* task should be sufficient.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskList vTaskList
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -2210,7 +2224,7 @@ void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unq
|
||||
* contain the generated report. Approximately 40 bytes per task should
|
||||
* be sufficient.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -2218,7 +2232,7 @@ void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unq
|
||||
void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code
|
||||
* uint32_t ulTaskGetIdleRunTimeCounter( void );
|
||||
@ -2246,7 +2260,7 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lin
|
||||
* frequency configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and
|
||||
* portGET_RUN_TIME_COUNTER_VALUE() macros.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup ulTaskGetIdleRunTimeCounter ulTaskGetIdleRunTimeCounter
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -2254,11 +2268,13 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lin
|
||||
uint32_t ulTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction );
|
||||
* BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2359,7 +2375,9 @@ uint32_t ulTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION;
|
||||
* @return Dependent on the value of eAction. See the description of the
|
||||
* eAction parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyIndexed xTaskNotifyIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
@ -2373,11 +2391,13 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyAndQueryIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
|
||||
* BaseType_t xTaskNotifyAndQuery( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2393,7 +2413,9 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
* than when the function returns) in the additional pulPreviousNotifyValue
|
||||
* parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyAndQueryIndexed xTaskNotifyAndQueryIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) \
|
||||
@ -2402,11 +2424,13 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2511,7 +2535,9 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
* @return Dependent on the value of eAction. See the description of the
|
||||
* eAction parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyIndexedFromISR xTaskNotifyIndexedFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
@ -2526,11 +2552,13 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyAndQueryIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* BaseType_t xTaskNotifyAndQueryFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2546,7 +2574,9 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
* function is called rather than at the time the function returns) in the
|
||||
* additional pulPreviousNotifyValue parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyAndQueryIndexedFromISR xTaskNotifyAndQueryIndexedFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
#define xTaskNotifyAndQueryIndexedFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) \
|
||||
@ -2555,12 +2585,14 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyWaitIndexed( UBaseType_t uxIndexToWaitOn, uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
|
||||
*
|
||||
* BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Waits for a direct to task notification to be pending at a given index within
|
||||
* an array of direct to task notifications.
|
||||
@ -2655,7 +2687,9 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
* already pending when xTaskNotifyWait was called) then pdPASS is
|
||||
* returned. Otherwise pdFAIL is returned.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyWaitIndexed xTaskNotifyWaitIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
@ -2669,11 +2703,13 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
xTaskGenericNotifyWait( ( uxIndexToWaitOn ), ( ulBitsToClearOnEntry ), ( ulBitsToClearOnExit ), ( pulNotificationValue ), ( xTicksToWait ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyGiveIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify );
|
||||
* BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Sends a direct to task notification to a particular index in the target
|
||||
* task's notification array in a manner similar to giving a counting semaphore.
|
||||
@ -2737,7 +2773,9 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
* @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the
|
||||
* eAction parameter set to eIncrement - so pdPASS is always returned.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyGiveIndexed xTaskNotifyGiveIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
#define xTaskNotifyGive( xTaskToNotify ) \
|
||||
@ -2746,11 +2784,13 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( 0 ), eIncrement, NULL )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskNotifyGiveIndexedFromISR( TaskHandle_t xTaskHandle, UBaseType_t uxIndexToNotify, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* A version of xTaskNotifyGiveIndexed() that can be called from an interrupt
|
||||
* service routine (ISR).
|
||||
@ -2821,7 +2861,9 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
* requested from an ISR is dependent on the port - see the documentation page
|
||||
* for the port in use.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskNotifyGiveIndexedFromISR vTaskNotifyGiveIndexedFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
|
||||
@ -2833,12 +2875,14 @@ void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
|
||||
vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( pxHigherPriorityTaskWoken ) );
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* uint32_t ulTaskNotifyTakeIndexed( UBaseType_t uxIndexToWaitOn, BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
|
||||
*
|
||||
* uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Waits for a direct to task notification on a particular index in the calling
|
||||
* task's notification array in a manner similar to taking a counting semaphore.
|
||||
@ -2927,7 +2971,9 @@ void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
|
||||
* @return The task's notification count before it is either cleared to zero or
|
||||
* decremented (see the xClearCountOnExit parameter).
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup ulTaskNotifyTakeIndexed ulTaskNotifyTakeIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
|
||||
@ -2939,12 +2985,14 @@ uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
|
||||
ulTaskGenericNotifyTake( ( uxIndexToWaitOn ), ( xClearCountOnExit ), ( xTicksToWait ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyStateClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToCLear );
|
||||
*
|
||||
* BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2992,7 +3040,9 @@ uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
|
||||
* @return pdTRUE if the task's notification state was set to
|
||||
* eNotWaitingNotification, otherwise pdFALSE.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyStateClearIndexed xTaskNotifyStateClearIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
|
||||
@ -3003,12 +3053,14 @@ BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
|
||||
xTaskGenericNotifyStateClear( ( xTask ), ( uxIndexToClear ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* uint32_t ulTaskNotifyValueClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToClear, uint32_t ulBitsToClear );
|
||||
*
|
||||
* uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -3057,7 +3109,9 @@ BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
|
||||
*
|
||||
* @return The value of the target task's notification value before the bits
|
||||
* specified by ulBitsToClear were cleared.
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup ulTaskNotifyValueClear ulTaskNotifyValueClear
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
|
||||
@ -3069,7 +3123,7 @@ uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
|
||||
ulTaskGenericNotifyValueClear( ( xTask ), ( uxIndexToClear ), ( ulBitsToClear ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut );
|
||||
@ -3082,14 +3136,14 @@ uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
|
||||
* is to be captured. The captured time includes the tick count and the number
|
||||
* of times the tick count has overflowed since the system first booted.
|
||||
* \defgroup vTaskSetTimeOutState vTaskSetTimeOutState
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \ingroup TaskCtrl
|
||||
* @endcond
|
||||
*/
|
||||
void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code
|
||||
* BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
|
||||
@ -3170,7 +3224,7 @@ void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
|
||||
* return uxReceived;
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCheckForTimeOut xTaskCheckForTimeOut
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -3179,7 +3233,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
|
||||
TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp );
|
||||
@ -3204,7 +3258,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
|
||||
* blocked state and a context switch being performed. Otherwise pdFALSE.
|
||||
*
|
||||
* \defgroup xTaskCatchUpTicks xTaskCatchUpTicks
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \ingroup TaskCtrl
|
||||
* @endcond
|
||||
*/
|
||||
@ -3214,7 +3268,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION;
|
||||
/*-----------------------------------------------------------
|
||||
* SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES
|
||||
*----------------------------------------------------------*/
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/*
|
||||
* Return the handle of the task running on a certain CPU. Because of
|
||||
* the nature of SMP processing, there is no guarantee that this
|
||||
@ -3335,8 +3389,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
|
||||
* making the call, otherwise pdFALSE.
|
||||
*/
|
||||
BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION;
|
||||
BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
||||
const TickType_t xItemValue ) PRIVILEGED_FUNCTION;
|
||||
void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
||||
const TickType_t xItemValue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY
|
||||
@ -3399,11 +3453,6 @@ void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
|
||||
*/
|
||||
UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Get the current core affinity of a task
|
||||
*/
|
||||
BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Set the uxTaskNumber of the task referenced by the xTask parameter to
|
||||
* uxHandle.
|
||||
|
@ -450,7 +450,7 @@ void vTimerSetTimerID( TimerHandle_t xTimer,
|
||||
BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* TaskHandle_t xTimerGetTimerDaemonTaskHandle( void );
|
||||
* @endcond
|
||||
*
|
||||
@ -1315,7 +1315,7 @@ TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
|
||||
*/
|
||||
TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/*
|
||||
* Functions beyond this part are not part of the public API and are intended
|
||||
@ -1339,7 +1339,7 @@ BaseType_t xTimerGenericCommand( TimerHandle_t xTimer,
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer, StackType_t ** ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize )
|
||||
|
@ -24,89 +24,465 @@
|
||||
*
|
||||
* 1 tab == 4 spaces!
|
||||
*/
|
||||
|
||||
#ifndef PORTMACRO_H
|
||||
#define PORTMACRO_H
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include "sdkconfig.h"
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include "soc/spinlock.h"
|
||||
#include "soc/interrupt_core0_reg.h"
|
||||
#include "soc/cpu.h"
|
||||
#include "esp_attr.h"
|
||||
#include "esp_rom_sys.h"
|
||||
#include "esp_timer.h" /* required for FreeRTOS run time stats */
|
||||
#include "esp_heap_caps.h"
|
||||
#include "esp_system.h" /* required by esp_get_...() functions in portable.h. [refactor-todo] Update portable.h */
|
||||
#include "esp_newlib.h"
|
||||
#include "portbenchmark.h"
|
||||
|
||||
/* [refactor-todo] These includes are not directly used in this file. They are kept into to prevent a breaking change. Remove these. */
|
||||
#include <limits.h>
|
||||
#ifdef CONFIG_LEGACY_INCLUDE_COMMON_HEADERS
|
||||
#include "soc/soc_memory_layout.h"
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <limits.h>
|
||||
#include "esp_timer.h" /* required for FreeRTOS run time stats */
|
||||
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_attr.h"
|
||||
#include "esp_heap_caps.h"
|
||||
#ifdef CONFIG_LEGACY_INCLUDE_COMMON_HEADERS
|
||||
#include "soc/soc_memory_layout.h"
|
||||
#endif
|
||||
#include "soc/spinlock.h"
|
||||
#include "soc/interrupt_core0_reg.h"
|
||||
#include "esp_rom_sys.h"
|
||||
#include "soc/cpu.h"
|
||||
#include "esp_system.h"
|
||||
#include "esp_newlib.h"
|
||||
/* --------------------------------------------------- Port Types ------------------------------------------------------
|
||||
* - Port specific types.
|
||||
* - The settings in this file configure FreeRTOS correctly for the given hardware and compiler.
|
||||
* - These settings should not be altered.
|
||||
* - The port types must come first as they are used further down in this file
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
/*-----------------------------------------------------------
|
||||
* Port specific definitions.
|
||||
*
|
||||
* The settings in this file configure FreeRTOS correctly for the
|
||||
* given hardware and compiler.
|
||||
*
|
||||
* These settings should not be altered.
|
||||
*-----------------------------------------------------------
|
||||
*/
|
||||
#define portCHAR uint8_t
|
||||
#define portFLOAT float
|
||||
#define portDOUBLE double
|
||||
#define portLONG int32_t
|
||||
#define portSHORT int16_t
|
||||
#define portSTACK_TYPE uint8_t
|
||||
#define portBASE_TYPE int
|
||||
|
||||
/* Type definitions. */
|
||||
#define portCHAR uint8_t
|
||||
#define portFLOAT float
|
||||
#define portDOUBLE double
|
||||
#define portLONG int32_t
|
||||
#define portSHORT int16_t
|
||||
#define portSTACK_TYPE uint8_t
|
||||
#define portBASE_TYPE int
|
||||
// interrupt module will mask interrupt with priority less than threshold
|
||||
#define RVHAL_EXCM_LEVEL 4
|
||||
typedef portSTACK_TYPE StackType_t;
|
||||
typedef portBASE_TYPE BaseType_t;
|
||||
typedef unsigned portBASE_TYPE UBaseType_t;
|
||||
|
||||
typedef portSTACK_TYPE StackType_t;
|
||||
typedef portBASE_TYPE BaseType_t;
|
||||
typedef unsigned portBASE_TYPE UBaseType_t;
|
||||
|
||||
#if( configUSE_16_BIT_TICKS == 1 )
|
||||
typedef uint16_t TickType_t;
|
||||
#define portMAX_DELAY ( TickType_t ) 0xffff
|
||||
#if (configUSE_16_BIT_TICKS == 1)
|
||||
typedef uint16_t TickType_t;
|
||||
#define portMAX_DELAY (TickType_t) 0xffff
|
||||
#else
|
||||
typedef uint32_t TickType_t;
|
||||
#define portMAX_DELAY ( TickType_t ) 0xffffffffUL
|
||||
typedef uint32_t TickType_t;
|
||||
#define portMAX_DELAY (TickType_t) 0xffffffffUL
|
||||
#endif
|
||||
/*------------------------------------------------------*/
|
||||
|
||||
/* Architecture specifics. */
|
||||
#define portSTACK_GROWTH ( -1 )
|
||||
#define portTICK_PERIOD_MS ( ( TickType_t ) (1000 / configTICK_RATE_HZ) )
|
||||
#define portBYTE_ALIGNMENT 16
|
||||
/*-----------------------------------------------------------*/
|
||||
#include "portbenchmark.h"
|
||||
/* Task function macros as described on the FreeRTOS.org WEB site. */
|
||||
#define portTASK_FUNCTION_PROTO(vFunction, pvParameters) void vFunction(void *pvParameters)
|
||||
#define portTASK_FUNCTION(vFunction, pvParameters) void vFunction(void *pvParameters)
|
||||
|
||||
static inline BaseType_t IRAM_ATTR xPortGetCoreID(void) {
|
||||
return cpu_hal_get_core_id();
|
||||
// interrupt module will mask interrupt with priority less than threshold
|
||||
#define RVHAL_EXCM_LEVEL 4
|
||||
|
||||
|
||||
/* ----------------------------------------------- Port Configurations -------------------------------------------------
|
||||
* - Configurations values supplied by each port
|
||||
* - Required by FreeRTOS
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
#define portCRITICAL_NESTING_IN_TCB 0
|
||||
#define portSTACK_GROWTH (-1)
|
||||
#define portTICK_PERIOD_MS ((TickType_t) (1000 / configTICK_RATE_HZ))
|
||||
#define portBYTE_ALIGNMENT 16
|
||||
#define portNOP() __asm volatile (" nop ")
|
||||
|
||||
|
||||
|
||||
/* ---------------------------------------------- Forward Declarations -------------------------------------------------
|
||||
* - Forward declarations of all the port functions and macros need to implement the FreeRTOS porting interface
|
||||
* - These must come before definition/declaration of the FreeRTOS porting interface
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
// --------------------- Interrupts ------------------------
|
||||
|
||||
/**
|
||||
* @brief Checks if the current core is in an ISR context
|
||||
*
|
||||
* - ISR context consist of Low/Mid priority ISR, or time tick ISR
|
||||
* - High priority ISRs aren't detected here, but they normally cannot call C code, so that should not be an issue anyway.
|
||||
*
|
||||
* @note [refactor-todo] Check if this should be inlined
|
||||
* @return
|
||||
* - pdTRUE if in ISR
|
||||
* - pdFALSE otherwise
|
||||
*/
|
||||
BaseType_t xPortInIsrContext(void);
|
||||
|
||||
/**
|
||||
* @brief Check if in ISR context from High priority ISRs
|
||||
*
|
||||
* - Called from High priority ISR
|
||||
* - Checks if the previous context (before high priority interrupt) was in ISR context (meaning low/med priority)
|
||||
*
|
||||
* @note [refactor-todo] Check if this should be inlined
|
||||
* @return
|
||||
* - pdTRUE if in previous in ISR context
|
||||
* - pdFALSE otherwise
|
||||
*/
|
||||
BaseType_t xPortInterruptedFromISRContext(void);
|
||||
|
||||
/**
|
||||
* @brief Disable interrupts in a nested manner
|
||||
*
|
||||
* - Cleaner solution allows nested interrupts disabling and restoring via local registers or stack.
|
||||
* - They can be called from interrupts too.
|
||||
* - WARNING Only applies to current CPU.
|
||||
*
|
||||
* @note [refactor-todo] Define this as portSET_INTERRUPT_MASK_FROM_ISR() instead
|
||||
* @return unsigned Previous interrupt state
|
||||
*/
|
||||
static inline unsigned portENTER_CRITICAL_NESTED(void);
|
||||
|
||||
/* ---------------------- Spinlocks ------------------------
|
||||
- Spinlocks added to match API with SMP FreeRTOS. Single core RISC-V does not need spin locks
|
||||
- Because single core does not have a primitive spinlock data type, we have to implement one here
|
||||
* @note [refactor-todo] Refactor critical section API so that this is no longer required
|
||||
* ------------------------------------------------------ */
|
||||
|
||||
/**
|
||||
* @brief Spinlock object
|
||||
* Owner:
|
||||
* - Set to 0 if uninitialized
|
||||
* - Set to portMUX_FREE_VAL when free
|
||||
* - Set to CORE_ID_REGVAL_PRO or CORE_ID_REGVAL_AP when locked
|
||||
* - Any other value indicates corruption
|
||||
* Count:
|
||||
* - 0 if unlocked
|
||||
* - Recursive count if locked
|
||||
*
|
||||
* @note Not a true spinlock as single core RISC-V does not have atomic compare and set instruction
|
||||
* @note Keep portMUX_INITIALIZER_UNLOCKED in sync with this struct
|
||||
*/
|
||||
typedef struct {
|
||||
uint32_t owner;
|
||||
uint32_t count;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
const char *lastLockedFn;
|
||||
int lastLockedLine;
|
||||
#endif
|
||||
} portMUX_TYPE;
|
||||
/**< Spinlock initializer */
|
||||
#ifndef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.owner = portMUX_FREE_VAL, \
|
||||
.count = 0, \
|
||||
}
|
||||
#else
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.owner = portMUX_FREE_VAL, \
|
||||
.count = 0, \
|
||||
.lastLockedFn = "(never locked)", \
|
||||
.lastLockedLine = -1 \
|
||||
}
|
||||
#endif /* CONFIG_FREERTOS_PORTMUX_DEBUG */
|
||||
#define portMUX_FREE_VAL SPINLOCK_FREE /**< Spinlock is free. [refactor-todo] check if this is still required */
|
||||
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /**< When passed for 'timeout_cycles', spin forever if necessary. [refactor-todo] check if this is still required */
|
||||
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /**< Try to acquire the spinlock a single time only. [refactor-todo] check if this is still required */
|
||||
|
||||
/**
|
||||
* @brief Initialize a spinlock
|
||||
*
|
||||
* - Initializes a spinlock that is used by FreeRTOS SMP critical sections
|
||||
*
|
||||
* @note [refactor-todo] We can make this inline or consider making it a macro
|
||||
* @param[in] mux Spinlock
|
||||
*/
|
||||
void vPortCPUInitializeMutex(portMUX_TYPE *mux);
|
||||
|
||||
/**
|
||||
* @brief Acquire a spinlock
|
||||
*
|
||||
* @note [refactor-todo] check if we still need this
|
||||
* @note [refactor-todo] Check if this should be inlined
|
||||
* @param[in] mux Spinlock
|
||||
*/
|
||||
void vPortCPUAcquireMutex(portMUX_TYPE *mux);
|
||||
|
||||
/**
|
||||
* @brief Acquire a spinlock but with a specified timeout
|
||||
*
|
||||
* @note [refactor-todo] Check if we still need this
|
||||
* @note [refactor-todo] Check if this should be inlined
|
||||
* @note [refactor-todo] Check if this function should be renamed (due to bool return type)
|
||||
* @param[in] mux Spinlock
|
||||
* @param[in] timeout Timeout in number of CPU cycles
|
||||
* @return true Spinlock acquired
|
||||
* @return false Timed out
|
||||
*/
|
||||
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles);
|
||||
|
||||
/**
|
||||
* @brief Release a spinlock
|
||||
*
|
||||
* @note [refactor-todo] check if we still need this
|
||||
* @note [refactor-todo] Check if this should be inlined
|
||||
* @param[in] mux Spinlock
|
||||
*/
|
||||
void vPortCPUReleaseMutex(portMUX_TYPE *mux);
|
||||
|
||||
/**
|
||||
* @brief Wrapper for atomic compare-and-set instruction
|
||||
*
|
||||
* @note Isn't a real atomic CAS.
|
||||
* @note [refactor-todo] check if we still need this
|
||||
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
|
||||
*
|
||||
* @param[inout] addr Pointer to target address
|
||||
* @param[in] compare Compare value
|
||||
* @param[inout] set Pointer to set value
|
||||
*/
|
||||
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
|
||||
|
||||
/**
|
||||
* @brief Wrapper for atomic compare-and-set instruction in external RAM
|
||||
*
|
||||
* @note Isn't a real atomic CAS.
|
||||
* @note [refactor-todo] check if we still need this
|
||||
* @note [refactor-todo] Check if this function should be renamed (due to void return type)
|
||||
*
|
||||
* @param[inout] addr Pointer to target address
|
||||
* @param[in] compare Compare value
|
||||
* @param[inout] set Pointer to set value
|
||||
*/
|
||||
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set);
|
||||
|
||||
// ------------------ Critical Sections --------------------
|
||||
|
||||
/**
|
||||
* @brief Enter a critical section
|
||||
*
|
||||
* - Simply disable interrupts
|
||||
* - Can be nested
|
||||
*/
|
||||
void vPortEnterCritical(void);
|
||||
|
||||
/**
|
||||
* @brief Exit a critical section
|
||||
*
|
||||
* - Reenables interrupts
|
||||
* - Can be nested
|
||||
*/
|
||||
void vPortExitCritical(void);
|
||||
|
||||
// ---------------------- Yielding -------------------------
|
||||
|
||||
/**
|
||||
* @brief Set interrupt mask and return current interrupt enable register
|
||||
*
|
||||
* @note [refactor-todo] Check if this function should be renamed (due to int return type)
|
||||
* @return int Current interrupt enable register before set
|
||||
*/
|
||||
int vPortSetInterruptMask(void);
|
||||
|
||||
/**
|
||||
* @brief Clear current interrupt mask and set given mask
|
||||
*
|
||||
* @param mask Interrupt mask
|
||||
*/
|
||||
void vPortClearInterruptMask(int mask);
|
||||
|
||||
/**
|
||||
* @brief Perform a context switch from a task
|
||||
*
|
||||
* @note [refactor-todo] The rest of ESP-IDF should call taskYield() instead
|
||||
*/
|
||||
void vPortYield(void);
|
||||
|
||||
/**
|
||||
* @brief Perform a context switch from an ISR
|
||||
*/
|
||||
void vPortYieldFromISR(void);
|
||||
|
||||
/**
|
||||
* @brief Yields the other core
|
||||
*
|
||||
* @note Added to be compatible with SMP API
|
||||
* @note [refactor-todo] Put this into private macros as its only called from task.c and is not public API
|
||||
* @param coreid ID of core to yield
|
||||
*/
|
||||
void vPortYieldOtherCore(BaseType_t coreid);
|
||||
|
||||
/**
|
||||
* @brief Checks if the current core can yield
|
||||
*
|
||||
* - A core cannot yield if its in an ISR or in a critical section
|
||||
*
|
||||
* @note [refactor-todo] See if this can be separated from port macro
|
||||
* @note [refactor-todo] Check if this function should be renamed (due to bool return type)
|
||||
* @return true Core can yield
|
||||
* @return false Core cannot yield
|
||||
*/
|
||||
static inline bool IRAM_ATTR xPortCanYield(void);
|
||||
|
||||
// ------------------- Hook Functions ----------------------
|
||||
|
||||
extern void esp_vApplicationIdleHook(void);
|
||||
extern void esp_vApplicationTickHook(void);
|
||||
|
||||
/**
|
||||
* @brief Hook function called on entry to tickless idle
|
||||
*
|
||||
* - Implemented in pm_impl.c
|
||||
*
|
||||
* @param xExpectedIdleTime Expected idle time
|
||||
*/
|
||||
void vApplicationSleep(TickType_t xExpectedIdleTime);
|
||||
|
||||
// ----------------------- System --------------------------
|
||||
|
||||
/**
|
||||
* @brief Get the tick rate per second
|
||||
*
|
||||
* @note [refactor-todo] make this inline
|
||||
* @note [refactor-todo] Check if this function should be renamed (due to uint return type)
|
||||
* @return uint32_t Tick rate in Hz
|
||||
*/
|
||||
uint32_t xPortGetTickRateHz(void);
|
||||
|
||||
/**
|
||||
* @brief Set a watchpoint to watch the last 32 bytes of the stack
|
||||
*
|
||||
* Callback to set a watchpoint on the end of the stack. Called every context switch to change the stack watchpoint
|
||||
* around.
|
||||
*
|
||||
* @param pxStackStart Pointer to the start of the stack
|
||||
*/
|
||||
void vPortSetStackWatchpoint(void *pxStackStart);
|
||||
|
||||
/**
|
||||
* @brief Get the current core's ID
|
||||
*
|
||||
* @note Added to be compatible with SMP API
|
||||
* @note [refactor-todo] IDF should call a FreeRTOS like macro instead of port function directly
|
||||
* @return BaseType_t Core ID
|
||||
*/
|
||||
static inline BaseType_t IRAM_ATTR xPortGetCoreID(void)
|
||||
{
|
||||
return (uint32_t) cpu_hal_get_core_id();
|
||||
}
|
||||
|
||||
|
||||
static inline bool IRAM_ATTR xPortCanYield(void)
|
||||
|
||||
/* ------------------------------------------- FreeRTOS Porting Interface ----------------------------------------------
|
||||
* - Contains all the mappings of the macros required by FreeRTOS
|
||||
* - Most come after forward declare as porting macros map to declared functions
|
||||
* - Maps to forward declared functions
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
// ----------------------- Memory --------------------------
|
||||
|
||||
/**
|
||||
* @brief Task memory allocation macros
|
||||
*
|
||||
* @note Because the ROM routines don't necessarily handle a stack in external RAM correctly, we force the stack
|
||||
* memory to always be internal.
|
||||
* @note [refactor-todo] Update portable.h to match v10.4.3 to use new malloc prototypes
|
||||
*/
|
||||
#define portTcbMemoryCaps (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
|
||||
#define portStackMemoryCaps (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
|
||||
#define pvPortMallocTcbMem(size) pvPortMalloc(size)
|
||||
#define pvPortMallocStackMem(size) pvPortMalloc(size)
|
||||
|
||||
// --------------------- Interrupts ------------------------
|
||||
|
||||
#define portEXIT_CRITICAL_NESTED(state) do { portCLEAR_INTERRUPT_MASK_FROM_ISR(state);} while(0);
|
||||
#define portDISABLE_INTERRUPTS() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||
#define portENABLE_INTERRUPTS() portCLEAR_INTERRUPT_MASK_FROM_ISR(1)
|
||||
#define portSET_INTERRUPT_MASK_FROM_ISR() vPortSetInterruptMask()
|
||||
#define portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedStatusValue) vPortClearInterruptMask(uxSavedStatusValue)
|
||||
|
||||
// ------------------ Critical Sections --------------------
|
||||
|
||||
#define portENTER_CRITICAL(mux) {(void)mux; vPortEnterCritical();}
|
||||
#define portEXIT_CRITICAL(mux) {(void)mux; vPortExitCritical();}
|
||||
//In single-core RISC-V, we can use the same critical section API
|
||||
#define portENTER_CRITICAL_ISR(mux) portENTER_CRITICAL(mux)
|
||||
#define portEXIT_CRITICAL_ISR(mux) portEXIT_CRITICAL(mux)
|
||||
/* [refactor-todo] on RISC-V, both ISR and non-ISR cases result in the same call. We can redefine this macro */
|
||||
#define portENTER_CRITICAL_SAFE(mux) ({ \
|
||||
if (xPortInIsrContext()) { \
|
||||
portENTER_CRITICAL_ISR(mux); \
|
||||
} else { \
|
||||
portENTER_CRITICAL(mux); \
|
||||
} \
|
||||
})
|
||||
#define portEXIT_CRITICAL_SAFE(mux) ({ \
|
||||
if (xPortInIsrContext()) { \
|
||||
portEXIT_CRITICAL_ISR(mux); \
|
||||
} else { \
|
||||
portEXIT_CRITICAL(mux); \
|
||||
} \
|
||||
})
|
||||
|
||||
// ---------------------- Yielding -------------------------
|
||||
|
||||
#define portYIELD() vPortYield()
|
||||
#define portYIELD_FROM_ISR() vPortYieldFromISR()
|
||||
#define portEND_SWITCHING_ISR(xSwitchRequired) if(xSwitchRequired) vPortYield()
|
||||
/* Yielding within an API call (when interrupts are off), means the yield should be delayed
|
||||
until interrupts are re-enabled.
|
||||
To do this, we use the "cross-core" interrupt as a trigger to yield on this core when interrupts are re-enabled.This
|
||||
is the same interrupt & code path which is used to trigger a yield between CPUs, although in this case the yield is
|
||||
happening on the same CPU.
|
||||
*/
|
||||
#define portYIELD_WITHIN_API() portYIELD()
|
||||
|
||||
// ------------------- Hook Functions ----------------------
|
||||
|
||||
#ifndef CONFIG_FREERTOS_LEGACY_HOOKS
|
||||
#define vApplicationIdleHook esp_vApplicationIdleHook
|
||||
#define vApplicationTickHook esp_vApplicationTickHook
|
||||
#endif /* !CONFIG_FREERTOS_LEGACY_HOOKS */
|
||||
#define portSUPPRESS_TICKS_AND_SLEEP(idleTime) vApplicationSleep(idleTime)
|
||||
|
||||
// ------------------- Run Time Stats ----------------------
|
||||
|
||||
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
|
||||
#define portGET_RUN_TIME_COUNTER_VALUE() 0
|
||||
#ifdef CONFIG_FREERTOS_RUN_TIME_STATS_USING_ESP_TIMER
|
||||
/* Coarse resolution time (us) */
|
||||
#define portALT_GET_RUN_TIME_COUNTER_VALUE(x) do {x = (uint32_t)esp_timer_get_time();} while(0)
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
/* --------------------------------------------- Inline Implementations ------------------------------------------------
|
||||
* - Implementation of inline functions of the forward declares
|
||||
* - Should come after forward declare and FreeRTOS Porting interface, as implementation may use both.
|
||||
* - For implementation of non-inlined functions, see port.c, port_common.c, or other assembly files
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
// --------------------- Interrupts ------------------------
|
||||
|
||||
static inline unsigned portENTER_CRITICAL_NESTED(void)
|
||||
{
|
||||
uint32_t threshold = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG);
|
||||
/* when enter critical code, freertos will mask threshold to RVHAL_EXCM_LEVEL
|
||||
* and exit critical code, will recover threshold value (1). so threshold <= 1
|
||||
* means not in critical code
|
||||
*/
|
||||
return (threshold <= 1);
|
||||
unsigned state = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
return state;
|
||||
}
|
||||
|
||||
// ---------------------- Spinlocks ------------------------
|
||||
|
||||
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
|
||||
{
|
||||
compare_and_set_native(addr, compare, set);
|
||||
}
|
||||
|
||||
static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t compare, uint32_t *set)
|
||||
@ -116,199 +492,56 @@ static inline void uxPortCompareSetExtram(volatile uint32_t *addr, uint32_t comp
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void __attribute__((always_inline)) uxPortCompareSet(volatile uint32_t *addr, uint32_t compare, uint32_t *set) {
|
||||
compare_and_set_native(addr, compare, set);
|
||||
// ---------------------- Yielding -------------------------
|
||||
|
||||
static inline bool IRAM_ATTR xPortCanYield(void)
|
||||
{
|
||||
uint32_t threshold = REG_READ(INTERRUPT_CORE0_CPU_INT_THRESH_REG);
|
||||
/* when enter critical code, FreeRTOS will mask threshold to RVHAL_EXCM_LEVEL
|
||||
* and exit critical code, will recover threshold value (1). so threshold <= 1
|
||||
* means not in critical code
|
||||
*/
|
||||
return (threshold <= 1);
|
||||
}
|
||||
|
||||
#define portCRITICAL_NESTING_IN_TCB 0
|
||||
|
||||
/*
|
||||
* Send an interrupt to another core in order to make the task running
|
||||
* on it yield for a higher-priority task.
|
||||
|
||||
/* ------------------------------------------------------ Misc ---------------------------------------------------------
|
||||
* - Miscellaneous porting macros
|
||||
* - These are not port of the FreeRTOS porting interface, but are used by other FreeRTOS dependent components
|
||||
* ------------------------------------------------------------------------------------------------------------------ */
|
||||
|
||||
// -------------------- Heap Related -----------------------
|
||||
|
||||
/**
|
||||
* @brief Checks if a given piece of memory can be used to store a task's TCB
|
||||
*
|
||||
* - Defined in port_common.c
|
||||
*
|
||||
* @param ptr Pointer to memory
|
||||
* @return true Memory can be used to store a TCB
|
||||
* @return false Otherwise
|
||||
*/
|
||||
void vPortYieldOtherCore( BaseType_t coreid);
|
||||
bool xPortCheckValidTCBMem(const void *ptr);
|
||||
|
||||
/*
|
||||
Callback to set a watchpoint on the end of the stack. Called every context switch to change the stack
|
||||
watchpoint around.
|
||||
/**
|
||||
* @brief Checks if a given piece of memory can be used to store a task's stack
|
||||
*
|
||||
* - Defined in port_common.c
|
||||
*
|
||||
* @param ptr Pointer to memory
|
||||
* @return true Memory can be used to store a task stack
|
||||
* @return false Otherwise
|
||||
*/
|
||||
void vPortSetStackWatchpoint( void* pxStackStart );
|
||||
bool xPortcheckValidStackMem(const void *ptr);
|
||||
|
||||
/*
|
||||
* Returns true if the current core is in ISR context; low prio ISR, med prio ISR or timer tick ISR. High prio ISRs
|
||||
* aren't detected here, but they normally cannot call C code, so that should not be an issue anyway.
|
||||
*/
|
||||
BaseType_t xPortInIsrContext(void);
|
||||
|
||||
/*
|
||||
* This function will be called in High prio ISRs. Returns true if the current core was in ISR context
|
||||
* before calling into high prio ISR context.
|
||||
*/
|
||||
BaseType_t xPortInterruptedFromISRContext(void);
|
||||
|
||||
/* "mux" data structure (spinlock) */
|
||||
typedef struct {
|
||||
/* owner field values:
|
||||
* 0 - Uninitialized (invalid)
|
||||
* portMUX_FREE_VAL - Mux is free, can be locked by either CPU
|
||||
* CORE_ID_REGVAL_PRO / CORE_ID_REGVAL_APP - Mux is locked to the particular core
|
||||
*
|
||||
*
|
||||
* Any value other than portMUX_FREE_VAL, CORE_ID_REGVAL_PRO, CORE_ID_REGVAL_APP indicates corruption
|
||||
*/
|
||||
uint32_t owner;
|
||||
/* count field:
|
||||
* If mux is unlocked, count should be zero.
|
||||
* If mux is locked, count is non-zero & represents the number of recursive locks on the mux.
|
||||
*/
|
||||
uint32_t count;
|
||||
#ifdef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
const char *lastLockedFn;
|
||||
int lastLockedLine;
|
||||
#endif
|
||||
} portMUX_TYPE;
|
||||
|
||||
#define portMUX_FREE_VAL SPINLOCK_FREE
|
||||
|
||||
/* Special constants for vPortCPUAcquireMutexTimeout() */
|
||||
#define portMUX_NO_TIMEOUT SPINLOCK_WAIT_FOREVER /* When passed for 'timeout_cycles', spin forever if necessary */
|
||||
#define portMUX_TRY_LOCK SPINLOCK_NO_WAIT /* Try to acquire the spinlock a single time only */
|
||||
|
||||
// Keep this in sync with the portMUX_TYPE struct definition please.
|
||||
#ifndef CONFIG_FREERTOS_PORTMUX_DEBUG
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.owner = portMUX_FREE_VAL, \
|
||||
.count = 0, \
|
||||
}
|
||||
#else
|
||||
#define portMUX_INITIALIZER_UNLOCKED { \
|
||||
.owner = portMUX_FREE_VAL, \
|
||||
.count = 0, \
|
||||
.lastLockedFn = "(never locked)", \
|
||||
.lastLockedLine = -1 \
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Scheduler utilities. */
|
||||
extern void vPortYield( void );
|
||||
extern void vPortYieldFromISR( void );
|
||||
|
||||
#define portYIELD() vPortYield()
|
||||
#define portYIELD_FROM_ISR() vPortYieldFromISR()
|
||||
|
||||
/* Yielding within an API call (when interrupts are off), means the yield should be delayed
|
||||
until interrupts are re-enabled.
|
||||
To do this, we use the "cross-core" interrupt as a trigger to yield on this core when interrupts are re-enabled.This
|
||||
is the same interrupt & code path which is used to trigger a yield between CPUs, although in this case the yield is
|
||||
happening on the same CPU.
|
||||
*/
|
||||
#define portYIELD_WITHIN_API() portYIELD()
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/* Critical section management. */
|
||||
extern int vPortSetInterruptMask(void);
|
||||
extern void vPortClearInterruptMask( int );
|
||||
|
||||
void vPortCPUInitializeMutex(portMUX_TYPE *mux);
|
||||
void vPortCPUAcquireMutex(portMUX_TYPE *mux);
|
||||
bool vPortCPUAcquireMutexTimeout(portMUX_TYPE *mux, int timeout_cycles);
|
||||
void vPortCPUReleaseMutex(portMUX_TYPE *mux);
|
||||
|
||||
extern void vPortEnterCritical( void );
|
||||
extern void vPortExitCritical( void );
|
||||
|
||||
#define portDISABLE_INTERRUPTS() portSET_INTERRUPT_MASK_FROM_ISR()
|
||||
#define portENABLE_INTERRUPTS() portCLEAR_INTERRUPT_MASK_FROM_ISR(1)
|
||||
|
||||
#define portENTER_CRITICAL(mux) {(void)mux; vPortEnterCritical();}
|
||||
#define portEXIT_CRITICAL(mux) {(void)mux; vPortExitCritical();}
|
||||
|
||||
#define portENTER_CRITICAL_ISR(mux) portENTER_CRITICAL(mux)
|
||||
#define portEXIT_CRITICAL_ISR(mux) portEXIT_CRITICAL(mux)
|
||||
|
||||
#define portENTER_CRITICAL_SAFE(mux) do { \
|
||||
if (xPortInIsrContext()) { \
|
||||
portENTER_CRITICAL_ISR(mux); \
|
||||
} else { \
|
||||
portENTER_CRITICAL(mux); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define portEXIT_CRITICAL_SAFE(mux) do { \
|
||||
if (xPortInIsrContext()) { \
|
||||
portEXIT_CRITICAL_ISR(mux); \
|
||||
} else { \
|
||||
portEXIT_CRITICAL(mux); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
/*------------------------------------------------------------*/
|
||||
#define portSET_INTERRUPT_MASK_FROM_ISR() vPortSetInterruptMask()
|
||||
#define portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedStatusValue ) vPortClearInterruptMask( uxSavedStatusValue )
|
||||
#define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired ) vPortYield()
|
||||
|
||||
// Cleaner solution allows nested interrupts disabling and restoring via local registers or stack.
|
||||
// They can be called from interrupts too.
|
||||
static inline unsigned portENTER_CRITICAL_NESTED(void) {
|
||||
unsigned state = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||
return state;
|
||||
}
|
||||
|
||||
#define portEXIT_CRITICAL_NESTED(state) do { portCLEAR_INTERRUPT_MASK_FROM_ISR( state );} while(0);
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
//Because the ROM routines don't necessarily handle a stack in external RAM correctly, we force
|
||||
//the stack memory to always be internal.
|
||||
#define portTcbMemoryCaps (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
|
||||
#define portStackMemoryCaps (MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT)
|
||||
|
||||
#define pvPortMallocTcbMem(size) pvPortMalloc(size)
|
||||
#define pvPortMallocStackMem(size) pvPortMalloc(size)
|
||||
|
||||
/* Fine resolution time */
|
||||
#define portGET_RUN_TIME_COUNTER_VALUE() 0
|
||||
#define portCONFIGURE_TIMER_FOR_RUN_TIME_STATS()
|
||||
|
||||
#ifdef CONFIG_FREERTOS_RUN_TIME_STATS_USING_ESP_TIMER
|
||||
/* Coarse resolution time (us) */
|
||||
#define portALT_GET_RUN_TIME_COUNTER_VALUE(x) do {x = (uint32_t)esp_timer_get_time();} while(0)
|
||||
#endif
|
||||
|
||||
extern void esp_vApplicationIdleHook( void );
|
||||
extern void esp_vApplicationTickHook( void );
|
||||
|
||||
#ifndef CONFIG_FREERTOS_LEGACY_HOOKS
|
||||
#define vApplicationIdleHook esp_vApplicationIdleHook
|
||||
#define vApplicationTickHook esp_vApplicationTickHook
|
||||
#endif /* !CONFIG_FREERTOS_LEGACY_HOOKS */
|
||||
|
||||
/* Task function macros as described on the FreeRTOS.org WEB site. */
|
||||
#define portTASK_FUNCTION_PROTO( vFunction, pvParameters ) void vFunction( void *pvParameters )
|
||||
#define portTASK_FUNCTION( vFunction, pvParameters ) void vFunction( void *pvParameters )
|
||||
|
||||
void vApplicationSleep( TickType_t xExpectedIdleTime );
|
||||
#define portSUPPRESS_TICKS_AND_SLEEP( idleTime ) vApplicationSleep( idleTime )
|
||||
|
||||
#define portNOP() __asm volatile ( " nop " )
|
||||
|
||||
#define portVALID_TCB_MEM(ptr) esp_ptr_byte_accessible(ptr)
|
||||
#define portVALID_STACK_MEM(ptr) esp_ptr_byte_accessible(ptr)
|
||||
|
||||
/* Get tick rate per second */
|
||||
uint32_t xPortGetTickRateHz(void);
|
||||
|
||||
// configASSERT_2 if requested
|
||||
#if configASSERT_2
|
||||
#include <stdio.h>
|
||||
void exit(int);
|
||||
#define configASSERT( x ) if (!(x)) { porttracePrint(-1); printf("\nAssertion failed in %s:%d\n", __FILE__, __LINE__); exit(-1); }
|
||||
#endif
|
||||
|
||||
|
||||
#endif //__ASSEMBLER__
|
||||
#define portVALID_TCB_MEM(ptr) xPortCheckValidTCBMem(ptr)
|
||||
#define portVALID_STACK_MEM(ptr) xPortcheckValidStackMem(ptr)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif //__ASSEMBLER__
|
||||
|
||||
#endif /* PORTMACRO_H */
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// The LL layer for I2S register operations
|
||||
/*******************************************************************************
|
||||
@ -86,6 +78,26 @@ static inline void i2s_ll_rx_enable_clock(i2s_dev_t *hw)
|
||||
hw->rx_clkm_conf.rx_clk_active = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Disable I2S tx module clock
|
||||
*
|
||||
* @param hw Peripheral I2S hardware instance address.
|
||||
*/
|
||||
static inline void i2s_ll_tx_disable_clock(i2s_dev_t *hw)
|
||||
{
|
||||
hw->tx_clkm_conf.tx_clk_active = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Disable I2S rx module clock
|
||||
*
|
||||
* @param hw Peripheral I2S hardware instance address.
|
||||
*/
|
||||
static inline void i2s_ll_rx_disable_clock(i2s_dev_t *hw)
|
||||
{
|
||||
hw->rx_clkm_conf.rx_clk_active = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief I2S mclk use tx module clock
|
||||
*
|
||||
|
@ -1,16 +1,9 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
@ -23,6 +16,9 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
#define RMT_LL_MAX_LOOP_COUNT (1023)/*!< Max loop count that hardware is supported */
|
||||
|
||||
#define RMT_LL_HW_BASE (&RMT)
|
||||
#define RMT_LL_MEM_BASE (&RMTMEM)
|
||||
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*******************************************************************************
|
||||
* NOTICE
|
||||
@ -176,28 +168,28 @@ void i2s_hal_enable_slave_fd_mode(i2s_hal_context_t *hal);
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_start_tx(hal) i2s_ll_tx_start((hal)->dev)
|
||||
void i2s_hal_start_tx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Start I2S rx
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_start_rx(hal) i2s_ll_rx_start((hal)->dev)
|
||||
void i2s_hal_start_rx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Stop I2S tx
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_stop_tx(hal) i2s_ll_tx_stop((hal)->dev)
|
||||
void i2s_hal_stop_tx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Stop I2S rx
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_stop_rx(hal) i2s_ll_rx_stop((hal)->dev)
|
||||
void i2s_hal_stop_rx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Set the received data length to trigger `in_suc_eof` interrupt.
|
||||
|
@ -1,22 +1,8 @@
|
||||
// Copyright 2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*******************************************************************************
|
||||
* NOTICE
|
||||
* The HAL is not public api, don't use in application code.
|
||||
* See readme.md in soc/README.md
|
||||
******************************************************************************/
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -21,15 +13,12 @@ extern "C" {
|
||||
/**
|
||||
* @brief LCD clock source
|
||||
* @note User should select the clock source based on the real requirement:
|
||||
* ╔═════════════════════╦══════════════════════════╦════════════════════════════╗
|
||||
* ║ LCD clock source ║ Features ║ Power Management ║
|
||||
* ╠═════════════════════╬══════════════════════════╬════════════════════════════╣
|
||||
* ║ LCD_CLK_SRC_PLL160M ║ High resolution, fixed ║ ESP_PM_APB_FREQ_MAX lock ║
|
||||
* ╠═════════════════════╬══════════════════════════╬════════════════════════════╣
|
||||
* ║ LCD_CLK_SRC_APLL ║ Configurable resolution ║ ESP_PM_NO_LIGHT_SLEEP lock ║
|
||||
* ╠═════════════════════╬══════════════════════════╬════════════════════════════╣
|
||||
* ║ LCD_CLK_SRC_XTAL ║ Medium resolution, fixed ║ No PM lock ║
|
||||
* ╚═════════════════════╩══════════════════════════╩════════════════════════════╝
|
||||
*
|
||||
* | LCD clock source | Features | Power Management |
|
||||
* |---------------------|--------------------------|----------------------------|
|
||||
* | LCD_CLK_SRC_PLL160M | High resolution, fixed | ESP_PM_APB_FREQ_MAX lock |
|
||||
* | LCD_CLK_SRC_APLL | Configurable resolution | ESP_PM_NO_LIGHT_SLEEP lock |
|
||||
* | LCD_CLK_SRC_XTAL | Medium resolution, fixed | No PM lock |
|
||||
*/
|
||||
typedef enum {
|
||||
LCD_CLK_SRC_PLL160M, /*!< Select PLL160M as the source clock */
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -155,12 +147,23 @@ typedef enum {
|
||||
TOUCH_PAD_INTR_MASK_INACTIVE = BIT(2), /*!<Inactive for one of the enabled channels. */
|
||||
TOUCH_PAD_INTR_MASK_SCAN_DONE = BIT(3), /*!<Measurement done for all the enabled channels. */
|
||||
TOUCH_PAD_INTR_MASK_TIMEOUT = BIT(4), /*!<Timeout for one of the enabled channels. */
|
||||
#if SOC_TOUCH_PROXIMITY_MEAS_DONE_SUPPORTED
|
||||
TOUCH_PAD_INTR_MASK_PROXI_MEAS_DONE = BIT(5), /*!<For proximity sensor, when the number of measurements reaches the set count of measurements, an interrupt will be generated. */
|
||||
TOUCH_PAD_INTR_MASK_MAX
|
||||
#define TOUCH_PAD_INTR_MASK_ALL (TOUCH_PAD_INTR_MASK_TIMEOUT \
|
||||
| TOUCH_PAD_INTR_MASK_SCAN_DONE \
|
||||
| TOUCH_PAD_INTR_MASK_INACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_ACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_DONE \
|
||||
| TOUCH_PAD_INTR_MASK_PROXI_MEAS_DONE) /*!<All touch interrupt type enable. */
|
||||
#else
|
||||
TOUCH_PAD_INTR_MASK_MAX
|
||||
#define TOUCH_PAD_INTR_MASK_ALL (TOUCH_PAD_INTR_MASK_TIMEOUT \
|
||||
| TOUCH_PAD_INTR_MASK_SCAN_DONE \
|
||||
| TOUCH_PAD_INTR_MASK_INACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_ACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_DONE) /*!<All touch interrupt type enable. */
|
||||
#endif
|
||||
} touch_pad_intr_mask_t;
|
||||
FLAG_ATTR(touch_pad_intr_mask_t)
|
||||
|
||||
|
@ -266,6 +266,12 @@
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* CONFIG_LWIP_DHCP_OPTIONS_LEN: The total length of outgoing DHCP option msg. If you have many options
|
||||
* and options value is too long, you can configure the length according to your requirements
|
||||
*/
|
||||
#define DHCP_OPTIONS_LEN CONFIG_LWIP_DHCP_OPTIONS_LEN
|
||||
|
||||
/*
|
||||
------------------------------------
|
||||
---------- AUTOIP options ----------
|
||||
|
@ -187,7 +187,10 @@ esp_err_t mdns_instance_name_set(const char * instance_name);
|
||||
* @note The value length of txt items will be automatically decided by strlen
|
||||
*
|
||||
* @param instance_name instance name to set. If NULL,
|
||||
* global instance name or hostname will be used
|
||||
* global instance name or hostname will be used.
|
||||
* Note that MDNS_MULTIPLE_INSTANCE config option
|
||||
* needs to be enabled for adding multiple instances
|
||||
* with the same instance type.
|
||||
* @param service_type service type (_http, _ftp, etc)
|
||||
* @param proto service protocol (_tcp, _udp)
|
||||
* @param port service port
|
||||
@ -209,6 +212,9 @@ esp_err_t mdns_service_add(const char * instance_name, const char * service_type
|
||||
*
|
||||
* @param instance_name instance name to set. If NULL,
|
||||
* global instance name or hostname will be used
|
||||
* Note that MDNS_MULTIPLE_INSTANCE config option
|
||||
* needs to be enabled for adding multiple instances
|
||||
* with the same instance type.
|
||||
* @param service_type service type (_http, _ftp, etc)
|
||||
* @param proto service protocol (_tcp, _udp)
|
||||
* @param hostname service hostname. If NULL, local hostname will be used.
|
||||
@ -238,6 +244,22 @@ esp_err_t mdns_service_add_for_host(const char * instance_name, const char * ser
|
||||
*/
|
||||
bool mdns_service_exists(const char * service_type, const char * proto, const char * hostname);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Check whether a service has been added.
|
||||
*
|
||||
* @param instance instance name
|
||||
* @param service_type service type (_http, _ftp, etc)
|
||||
* @param proto service protocol (_tcp, _udp)
|
||||
* @param hostname service hostname. If NULL, checks for the local hostname.
|
||||
*
|
||||
* @return
|
||||
* - true Correspondding service has been added.
|
||||
* - false Service not found.
|
||||
*/
|
||||
bool mdns_service_exists_with_instance(const char *instance, const char *service_type, const char *proto,
|
||||
const char *hostname);
|
||||
|
||||
/**
|
||||
* @brief Remove service from mDNS server
|
||||
*
|
||||
|
@ -206,6 +206,7 @@ typedef struct {
|
||||
int network_timeout_ms; /*!< Abort network operation if it is not completed after this value, in milliseconds (defaults to 10s) */
|
||||
bool disable_keepalive; /*!< Set disable_keepalive=true to turn off keep-alive mechanism, false by default (keepalive is active by default). Note: setting the config value `keepalive` to `0` doesn't disable keepalive feature, but uses a default keepalive period */
|
||||
const char *path; /*!< Path in the URI*/
|
||||
int message_retransmit_timeout; /*!< timeout for retansmit of failded packet */
|
||||
} esp_mqtt_client_config_t;
|
||||
|
||||
/**
|
||||
@ -375,6 +376,7 @@ esp_err_t esp_mqtt_client_destroy(esp_mqtt_client_handle_t client);
|
||||
* @param config mqtt configuration structure
|
||||
*
|
||||
* @return ESP_ERR_NO_MEM if failed to allocate
|
||||
* ESP_ERR_INVALID_ARG if conflicts on transport configuration.
|
||||
* ESP_OK on success
|
||||
*/
|
||||
esp_err_t esp_mqtt_set_config(esp_mqtt_client_handle_t client, const esp_mqtt_client_config_t *config);
|
||||
|
@ -1,16 +1,13 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Allow for this warning suppression only in IDF_CI_BUILD
|
||||
#if !defined(ESP_OPENSSL_SUPPRESS_LEGACY_WARNING) || !defined(IDF_CI_BUILD)
|
||||
#warning "OpenSSL component will be removed from ESP-IDF in v5.0, please use esp_tls instead"
|
||||
#endif
|
||||
|
||||
#ifndef _SSL_H_
|
||||
#define _SSL_H_
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -86,6 +78,10 @@ typedef struct protocomm_ble_config {
|
||||
* Pointer to the Name-UUID lookup table
|
||||
*/
|
||||
protocomm_ble_name_uuid_t *nu_lookup;
|
||||
|
||||
/* BLE bonding */
|
||||
unsigned ble_bonding:1;
|
||||
|
||||
} protocomm_ble_config_t;
|
||||
|
||||
/**
|
||||
|
@ -114,11 +114,14 @@ extern "C" {
|
||||
set sleep_init default param
|
||||
*/
|
||||
#define RTC_CNTL_DBG_ATTEN_LIGHTSLEEP_DEFAULT 3
|
||||
#define RTC_CNTL_DBG_ATTEN_LIGHTSLEEP_NODROP 0
|
||||
#define RTC_CNTL_DBG_ATTEN_DEEPSLEEP_DEFAULT 15
|
||||
#define RTC_CNTL_DBG_ATTEN_MONITOR_DEFAULT 0
|
||||
#define RTC_CNTL_BIASSLP_MONITOR_DEFAULT 0
|
||||
#define RTC_CNTL_BIASSLP_SLEEP_ON 0
|
||||
#define RTC_CNTL_BIASSLP_SLEEP_DEFAULT 1
|
||||
#define RTC_CNTL_PD_CUR_MONITOR_DEFAULT 0
|
||||
#define RTC_CNTL_PD_CUR_SLEEP_ON 0
|
||||
#define RTC_CNTL_PD_CUR_SLEEP_DEFAULT 1
|
||||
#define RTC_CNTL_DG_VDD_DRV_B_SLP_DEFAULT 254
|
||||
|
||||
@ -639,7 +642,8 @@ typedef struct {
|
||||
uint32_t rtc_peri_pd_en : 1; //!< power down RTC peripherals
|
||||
uint32_t wifi_pd_en : 1; //!< power down WiFi
|
||||
uint32_t bt_pd_en : 1; //!< power down BT
|
||||
uint32_t cpu_pd_en : 1; //!< power down CPU, but not restart when lightsleep.
|
||||
uint32_t cpu_pd_en : 1; //!< power down CPU, but not restart when lightsleep.
|
||||
uint32_t int_8m_pd_en : 1; //!< Power down Internal 8M oscillator
|
||||
uint32_t dig_peri_pd_en : 1; //!< power down digital peripherals
|
||||
uint32_t deep_slp : 1; //!< power down digital domain
|
||||
uint32_t wdt_flashboot_mod_en : 1; //!< enable WDT flashboot mode
|
||||
@ -648,6 +652,7 @@ typedef struct {
|
||||
uint32_t rtc_dbias_wak : 5; //!< set bias for RTC domain, in active mode
|
||||
uint32_t rtc_dbias_slp : 5; //!< set bias for RTC domain, in sleep mode
|
||||
uint32_t vddsdio_pd_en : 1; //!< power down VDDSDIO regulator
|
||||
uint32_t xtal_fpu : 1; //!< keep main XTAL powered up in sleep
|
||||
uint32_t deep_slp_reject : 1;
|
||||
uint32_t light_slp_reject : 1;
|
||||
} rtc_sleep_config_t;
|
||||
@ -660,6 +665,7 @@ typedef struct {
|
||||
*
|
||||
* @param RTC_SLEEP_PD_x flags combined using bitwise OR
|
||||
*/
|
||||
#define is_dslp(pd_flags) ((pd_flags) & RTC_SLEEP_PD_DIG)
|
||||
#define RTC_SLEEP_CONFIG_DEFAULT(sleep_flags) { \
|
||||
.lslp_mem_inf_fpu = 0, \
|
||||
.rtc_mem_inf_follow_cpu = ((sleep_flags) & RTC_SLEEP_PD_RTC_MEM_FOLLOW_CPU) ? 1 : 0, \
|
||||
@ -669,14 +675,20 @@ typedef struct {
|
||||
.wifi_pd_en = ((sleep_flags) & RTC_SLEEP_PD_WIFI) ? 1 : 0, \
|
||||
.bt_pd_en = ((sleep_flags) & RTC_SLEEP_PD_BT) ? 1 : 0, \
|
||||
.cpu_pd_en = ((sleep_flags) & RTC_SLEEP_PD_CPU) ? 1 : 0, \
|
||||
.int_8m_pd_en = is_dslp(sleep_flags) ? 1 : ((sleep_flags) & RTC_SLEEP_PD_INT_8M) ? 1 : 0, \
|
||||
.dig_peri_pd_en = ((sleep_flags) & RTC_SLEEP_PD_DIG_PERIPH) ? 1 : 0, \
|
||||
.deep_slp = ((sleep_flags) & RTC_SLEEP_PD_DIG) ? 1 : 0, \
|
||||
.wdt_flashboot_mod_en = 0, \
|
||||
.dig_dbias_wak = RTC_CNTL_DBIAS_1V10, \
|
||||
.dig_dbias_slp = RTC_CNTL_DBIAS_SLP, \
|
||||
.dig_dbias_slp = is_dslp(sleep_flags) ? RTC_CNTL_DBIAS_SLP \
|
||||
: !((sleep_flags) & RTC_SLEEP_PD_INT_8M) ? RTC_CNTL_DBIAS_1V10 \
|
||||
: RTC_CNTL_DBIAS_SLP, \
|
||||
.rtc_dbias_wak = RTC_CNTL_DBIAS_1V10, \
|
||||
.rtc_dbias_slp = RTC_CNTL_DBIAS_SLP, \
|
||||
.rtc_dbias_slp = is_dslp(sleep_flags) ? RTC_CNTL_DBIAS_SLP \
|
||||
: !((sleep_flags) & RTC_SLEEP_PD_INT_8M) ? RTC_CNTL_DBIAS_1V10 \
|
||||
: RTC_CNTL_DBIAS_SLP, \
|
||||
.vddsdio_pd_en = ((sleep_flags) & RTC_SLEEP_PD_VDDSDIO) ? 1 : 0, \
|
||||
.xtal_fpu = is_dslp(sleep_flags) ? 0 : ((sleep_flags) & RTC_SLEEP_PD_XTAL) ? 0 : 1, \
|
||||
.deep_slp_reject = 1, \
|
||||
.light_slp_reject = 1 \
|
||||
};
|
||||
@ -691,6 +703,8 @@ typedef struct {
|
||||
#define RTC_SLEEP_PD_BT BIT(7) //!< Power down BT
|
||||
#define RTC_SLEEP_PD_CPU BIT(8) //!< Power down CPU when in lightsleep, but not restart
|
||||
#define RTC_SLEEP_PD_DIG_PERIPH BIT(9) //!< Power down DIG peripherals
|
||||
#define RTC_SLEEP_PD_INT_8M BIT(10) //!< Power down Internal 8M oscillator
|
||||
#define RTC_SLEEP_PD_XTAL BIT(11) //!< Power down main XTAL
|
||||
|
||||
/**
|
||||
* @brief Prepare the chip to enter sleep mode
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
@ -181,7 +181,9 @@ esp_partition_iterator_t esp_partition_next(esp_partition_iterator_t iterator);
|
||||
/**
|
||||
* @brief Release partition iterator
|
||||
*
|
||||
* @param iterator Iterator obtained using esp_partition_find. Must be non-NULL.
|
||||
* @param iterator Iterator obtained using esp_partition_find.
|
||||
* The iterator is allowed to be NULL, so it is not necessary to check its value
|
||||
* before calling this function.
|
||||
*
|
||||
*/
|
||||
void esp_partition_iterator_release(esp_partition_iterator_t iterator);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user