forked from espressif/arduino-esp32
IDF master b86fe0c66c
esp-dsp: master e05fc36 esp-face: master f108a83 esp-rainmaker: f1b82c7 esp32-camera: master 3022601 esp_littlefs: master f6e7108
This commit is contained in:
File diff suppressed because one or more lines are too long
@ -300,7 +300,7 @@ env.Append(
|
||||
"UNITY_INCLUDE_CONFIG_H",
|
||||
"WITH_POSIX",
|
||||
"_GNU_SOURCE",
|
||||
("IDF_VER", '\\"v4.4-dev-3235-g3e370c4296\\"'),
|
||||
("IDF_VER", '\\"v4.4-dev-3401-gb86fe0c66c\\"'),
|
||||
"ESP_PLATFORM",
|
||||
"ARDUINO_ARCH_ESP32",
|
||||
"ESP32",
|
||||
|
File diff suppressed because one or more lines are too long
@ -286,7 +286,7 @@ env.Append(
|
||||
"UNITY_INCLUDE_CONFIG_H",
|
||||
"WITH_POSIX",
|
||||
"_GNU_SOURCE",
|
||||
("IDF_VER", '\\"v4.4-dev-3235-g3e370c4296\\"'),
|
||||
("IDF_VER", '\\"v4.4-dev-3401-gb86fe0c66c\\"'),
|
||||
"ESP_PLATFORM",
|
||||
"ARDUINO_ARCH_ESP32",
|
||||
"ESP32",
|
||||
|
@ -18,6 +18,11 @@
|
||||
# define ASIO_NO_TYPEID
|
||||
# endif // CONFIG_COMPILER_RTTI
|
||||
|
||||
//
|
||||
// Supress OpenSSL deprecation warning, when building ASIO
|
||||
//
|
||||
#define ESP_OPENSSL_SUPPRESS_LEGACY_WARNING
|
||||
|
||||
//
|
||||
// LWIP compatibility inet and address macros/functions
|
||||
//
|
||||
|
@ -191,6 +191,9 @@ int coap_handle_response_get_block(coap_context_t *context,
|
||||
void coap_block_delete_lg_xmit(coap_session_t *session,
|
||||
coap_lg_xmit_t *lg_xmit);
|
||||
|
||||
coap_tick_t coap_block_check_lg_xmit_timeouts(coap_session_t *session,
|
||||
coap_tick_t now);
|
||||
|
||||
/**
|
||||
* The function that does all the work for the coap_add_data_large*()
|
||||
* functions.
|
||||
|
@ -27,6 +27,12 @@ typedef struct coap_dtls_pki_t coap_dtls_pki_t;
|
||||
#ifndef COAP_DTLS_HINT_LENGTH
|
||||
#define COAP_DTLS_HINT_LENGTH 128
|
||||
#endif
|
||||
#ifndef COAP_DTLS_MAX_PSK_IDENTITY
|
||||
#define COAP_DTLS_MAX_PSK_IDENTITY 64
|
||||
#endif
|
||||
#ifndef COAP_DTLS_MAX_PSK
|
||||
#define COAP_DTLS_MAX_PSK 64
|
||||
#endif
|
||||
|
||||
typedef enum coap_dtls_role_t {
|
||||
COAP_DTLS_ROLE_CLIENT, /**< Internal function invoked for client */
|
||||
|
@ -24,34 +24,34 @@
|
||||
* Scalar type to represent different events, e.g. DTLS events or
|
||||
* retransmission timeouts.
|
||||
*/
|
||||
typedef unsigned int coap_event_t;
|
||||
|
||||
typedef enum coap_event_t {
|
||||
/**
|
||||
* (D)TLS events for COAP_PROTO_DTLS and COAP_PROTO_TLS
|
||||
*/
|
||||
#define COAP_EVENT_DTLS_CLOSED 0x0000
|
||||
#define COAP_EVENT_DTLS_CONNECTED 0x01DE
|
||||
#define COAP_EVENT_DTLS_RENEGOTIATE 0x01DF
|
||||
#define COAP_EVENT_DTLS_ERROR 0x0200
|
||||
COAP_EVENT_DTLS_CLOSED = 0x0000,
|
||||
COAP_EVENT_DTLS_CONNECTED = 0x01DE,
|
||||
COAP_EVENT_DTLS_RENEGOTIATE = 0x01DF,
|
||||
COAP_EVENT_DTLS_ERROR = 0x0200,
|
||||
|
||||
/**
|
||||
* TCP events for COAP_PROTO_TCP and COAP_PROTO_TLS
|
||||
*/
|
||||
#define COAP_EVENT_TCP_CONNECTED 0x1001
|
||||
#define COAP_EVENT_TCP_CLOSED 0x1002
|
||||
#define COAP_EVENT_TCP_FAILED 0x1003
|
||||
COAP_EVENT_TCP_CONNECTED = 0x1001,
|
||||
COAP_EVENT_TCP_CLOSED = 0x1002,
|
||||
COAP_EVENT_TCP_FAILED = 0x1003,
|
||||
|
||||
/**
|
||||
* CSM exchange events for reliable protocols only
|
||||
*/
|
||||
#define COAP_EVENT_SESSION_CONNECTED 0x2001
|
||||
#define COAP_EVENT_SESSION_CLOSED 0x2002
|
||||
#define COAP_EVENT_SESSION_FAILED 0x2003
|
||||
COAP_EVENT_SESSION_CONNECTED = 0x2001,
|
||||
COAP_EVENT_SESSION_CLOSED = 0x2002,
|
||||
COAP_EVENT_SESSION_FAILED = 0x2003,
|
||||
|
||||
/**
|
||||
* BLOCK2 receive errors
|
||||
* (Q-)BLOCK receive errors
|
||||
*/
|
||||
#define COAP_EVENT_PARTIAL_BLOCK 0x3001
|
||||
COAP_EVENT_PARTIAL_BLOCK = 0x3001
|
||||
} coap_event_t;
|
||||
|
||||
/**
|
||||
* Type for event handler functions that can be registered with a CoAP
|
||||
|
@ -88,7 +88,11 @@ COAP_STATIC_INLINE uint64_t coap_ticks_to_rt_us(coap_tick_t t) {
|
||||
#elif defined(RIOT_VERSION)
|
||||
#include <xtimer.h>
|
||||
|
||||
#ifdef XTIMER_HZ
|
||||
#define COAP_TICKS_PER_SECOND (XTIMER_HZ)
|
||||
#else /* XTIMER_HZ */
|
||||
#define COAP_TICKS_PER_SECOND (XTIMER_HZ_BASE)
|
||||
#endif /* XTIMER_HZ */
|
||||
|
||||
typedef uint64_t coap_tick_t;
|
||||
typedef int64_t coap_tick_diff_t;
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#ifndef _WIN32
|
||||
#include <sys/select.h>
|
||||
#include <sys/time.h>
|
||||
#endif
|
||||
#include <time.h>
|
||||
|
@ -299,7 +299,6 @@ typedef enum coap_pdu_code_t {
|
||||
COAP_REQUEST_CODE_PATCH = COAP_REQUEST_PATCH,
|
||||
COAP_REQUEST_CODE_IPATCH = COAP_REQUEST_IPATCH,
|
||||
|
||||
COAP_RESPONSE_CODE_OK = COAP_RESPONSE_CODE(200),
|
||||
COAP_RESPONSE_CODE_CREATED = COAP_RESPONSE_CODE(201),
|
||||
COAP_RESPONSE_CODE_DELETED = COAP_RESPONSE_CODE(202),
|
||||
COAP_RESPONSE_CODE_VALID = COAP_RESPONSE_CODE(203),
|
||||
|
@ -83,7 +83,8 @@ typedef void (*coap_method_handler_t)
|
||||
* variable of coap_str_const_t has to point to constant text, or point to data
|
||||
* within the allocated coap_str_const_t parameter.
|
||||
*
|
||||
* @param uri_path The string URI path of the new resource.
|
||||
* @param uri_path The string URI path of the new resource. The leading '/' is
|
||||
* not normally required - e.g. just "full/path/for/resource".
|
||||
* @param flags Flags for memory management (in particular release of
|
||||
* memory). Possible values:@n
|
||||
*
|
||||
|
@ -26,6 +26,7 @@
|
||||
#define CONFIG_BOOTLOADER_FLASH_XMC_SUPPORT 1
|
||||
#define CONFIG_ESPTOOLPY_BAUD_OTHER_VAL 115200
|
||||
#define CONFIG_ESPTOOLPY_FLASHMODE_DIO 1
|
||||
#define CONFIG_ESPTOOLPY_FLASH_SAMPLE_MODE_STR 1
|
||||
#define CONFIG_ESPTOOLPY_FLASHMODE "dio"
|
||||
#define CONFIG_ESPTOOLPY_FLASHFREQ_40M 1
|
||||
#define CONFIG_ESPTOOLPY_FLASHFREQ "40m"
|
||||
@ -369,6 +370,7 @@
|
||||
#define CONFIG_LWIP_GARP_TMR_INTERVAL 60
|
||||
#define CONFIG_LWIP_TCPIP_RECVMBOX_SIZE 32
|
||||
#define CONFIG_LWIP_DHCP_RESTORE_LAST_IP 1
|
||||
#define CONFIG_LWIP_DHCP_OPTIONS_LEN 68
|
||||
#define CONFIG_LWIP_DHCPS 1
|
||||
#define CONFIG_LWIP_DHCPS_LEASE_UNIT 60
|
||||
#define CONFIG_LWIP_DHCPS_MAX_STATION_NUM 8
|
||||
@ -480,6 +482,7 @@
|
||||
#define CONFIG_MDNS_TASK_AFFINITY 0x0
|
||||
#define CONFIG_MDNS_SERVICE_ADD_TIMEOUT_MS 2000
|
||||
#define CONFIG_MDNS_TIMER_PERIOD_MS 100
|
||||
#define CONFIG_MDNS_MULTIPLE_INSTANCE 1
|
||||
#define CONFIG_MQTT_PROTOCOL_311 1
|
||||
#define CONFIG_MQTT_TRANSPORT_SSL 1
|
||||
#define CONFIG_MQTT_TRANSPORT_WEBSOCKET 1
|
||||
@ -675,5 +678,5 @@
|
||||
#define CONFIG_ULP_COPROC_ENABLED CONFIG_ESP32_ULP_COPROC_ENABLED
|
||||
#define CONFIG_ULP_COPROC_RESERVE_MEM CONFIG_ESP32_ULP_COPROC_RESERVE_MEM
|
||||
#define CONFIG_WARN_WRITE_STRINGS CONFIG_COMPILER_WARN_WRITE_STRINGS
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "3e370c4296"
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "b86fe0c66c"
|
||||
#define CONFIG_ARDUINO_IDF_BRANCH "master"
|
||||
|
@ -856,16 +856,35 @@ esp_err_t rmt_remove_channel_from_group(rmt_channel_t channel);
|
||||
|
||||
#if SOC_RMT_SUPPORT_TX_LOOP_COUNT
|
||||
/**
|
||||
* @brief Set loop count for RMT TX channel
|
||||
* @brief Set loop count threshold value for RMT TX channel
|
||||
*
|
||||
* When tx loop count reaches this value, an ISR callback will notify user
|
||||
*
|
||||
* @param channel RMT channel
|
||||
* @param count loop count
|
||||
* @param count loop count, 1 ~ 1023
|
||||
* @return
|
||||
* - ESP_ERR_INVALID_ARG Parameter error
|
||||
* - ESP_OK Success
|
||||
*/
|
||||
esp_err_t rmt_set_tx_loop_count(rmt_channel_t channel, uint32_t count);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Enable or disable the feature that when loop count reaches the threshold, RMT will stop transmitting.
|
||||
*
|
||||
* - When the loop auto-stop feature is enabled will halt RMT transmission after the loop count reaches a certain threshold
|
||||
* - When disabled, the RMT transmission continue indefinitely until halted by the users
|
||||
*
|
||||
* @note The auto-stop feature is implemented in hardware on particular targets (i.e. those with SOC_RMT_SUPPORT_TX_LOOP_AUTOSTOP defined).
|
||||
* Otherwise, the auto-stop feature is implemented in software via the interrupt.
|
||||
*
|
||||
* @param channel RMT channel
|
||||
* @param en enable bit
|
||||
* @return
|
||||
* - ESP_ERR_INVALID_ARG Parameter error
|
||||
* - ESP_OK Success
|
||||
*/
|
||||
esp_err_t rmt_enable_tx_loop_autostop(rmt_channel_t channel, bool en);
|
||||
#endif // SOC_RMT_SUPPORT_TX_LOOP_COUNT
|
||||
|
||||
/**
|
||||
* @brief Reset RMT TX/RX memory index.
|
||||
|
@ -10,7 +10,7 @@
|
||||
#define DL_LOG_LAYER_LATENCY 0 /*<! - 1: print the latency of each parts of layer */
|
||||
/*<! - 0: mute */
|
||||
|
||||
#if CONFIG_SPIRAM_SUPPORT || CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S3_SPIRAM_SUPPORT
|
||||
#if CONFIG_SPIRAM_SUPPORT || CONFIG_ESP32_SPIRAM_SUPPORT || CONFIG_ESP32S2_SPIRAM_SUPPORT || CONFIG_ESP32S3_SPIRAM_SUPPORT
|
||||
#define DL_SPIRAM_SUPPORT 1
|
||||
#else
|
||||
#define DL_SPIRAM_SUPPORT 0
|
||||
@ -83,8 +83,17 @@ namespace dl
|
||||
|
||||
typedef enum
|
||||
{
|
||||
PADDING_VALID, /*<! no padding >*/
|
||||
PADDING_SAME, /*<! SAME in TensorFlow style >*/
|
||||
PADDING_SAME_MXNET /*<! SAME in MXNET style >*/
|
||||
PADDING_NOT_SET,
|
||||
PADDING_VALID, /*<! no padding >*/
|
||||
PADDING_SAME_BEGIN, /*<! SAME in MXNET style >*/
|
||||
PADDING_SAME_END, /*<! SAME in TensorFlow style >*/
|
||||
} padding_type_t;
|
||||
} // namespace dl
|
||||
|
||||
typedef enum
|
||||
{
|
||||
CONSTANT,
|
||||
EDGE,
|
||||
REFLECT,
|
||||
SYMMETRIC,
|
||||
} padding_mode_t;
|
||||
} // namespace dl
|
||||
|
@ -370,11 +370,70 @@ namespace dl
|
||||
*/
|
||||
uint32_t get_moving_point_number(uint8_t *f1, uint8_t *f2, const uint32_t height, const uint32_t width, const uint32_t stride, const uint32_t threshold = 5);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Apply an affine transformation to an image.
|
||||
*
|
||||
* @tparam T
|
||||
* @param input the input image.
|
||||
* @param output the output image.
|
||||
* @param M_inv the inverse transformation matrix.
|
||||
*/
|
||||
template <typename T>
|
||||
void warp_affine(dl::Tensor<T> *input, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);
|
||||
|
||||
/**
|
||||
* @brief Apply an affine transformation to an image.
|
||||
*
|
||||
* @tparam T
|
||||
* @param input the pointer of the input image.
|
||||
* @param shape the shape of the input image.
|
||||
* @param output the output image.
|
||||
* @param M_inv the inverse transformation matrix.
|
||||
*/
|
||||
template <typename T>
|
||||
void warp_affine(uint16_t *input, std::vector<int> shape, dl::Tensor<T> *output, dl::math::Matrix<float> *M_inv);
|
||||
|
||||
/**
|
||||
* @brief Get the otsu thresh object.
|
||||
*
|
||||
* @param image the gray image.
|
||||
* @return uint8_t the otsu thresh.
|
||||
*/
|
||||
uint8_t get_otsu_thresh(Tensor<uint8_t> &image);
|
||||
|
||||
/**
|
||||
* @brief Convert RGB image to gray image
|
||||
*
|
||||
* @param image input image
|
||||
* @param bgr true: the image is in BGR format
|
||||
* false: the image is in RGB format
|
||||
* @return Tensor<uint8_t>* output image in gray format
|
||||
*/
|
||||
Tensor<uint8_t> *rgb2gray(Tensor<uint8_t> &image, bool bgr = false);
|
||||
|
||||
/**
|
||||
* @brief Convert RGB image to LAB image
|
||||
*
|
||||
* @param image input image
|
||||
* @param bgr true: the image is in BGR format
|
||||
* false: the image is in RGB format
|
||||
* @param fast true: use the fast alogrithm, but the accuracy will be reduced
|
||||
* false: do not use the fast alogrithm
|
||||
* @return Tensor<uint8_t>* output image in LAB foramt
|
||||
*/
|
||||
Tensor<uint8_t> *rgb2lab(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);
|
||||
|
||||
/**
|
||||
* @brief Convert RGB image to HSV image
|
||||
*
|
||||
* @param image input image
|
||||
* @param bgr true: the image is in BGR format
|
||||
* false: the image is in RGB format
|
||||
* @param fast true: use the fast alogrithm, but the accuracy will be reduced
|
||||
* false: do not use the fast alogrithm
|
||||
* @return Tensor<uint8_t>* output image in HSV format
|
||||
*/
|
||||
Tensor<uint8_t> *rgb2hsv(Tensor<uint8_t> &image, bool bgr = false, bool fast = true);
|
||||
|
||||
} // namespace image
|
||||
} // namespace dl
|
||||
|
@ -25,7 +25,8 @@ namespace dl
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of add2d >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of add2d >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
@ -35,19 +36,21 @@ namespace dl
|
||||
* @param activation activation of add2d, if you don't specify anything, no activation is applied
|
||||
* @param name name of add2d
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Add2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = NULL, bool inplace = false) : Layer(name), activation(activation), output_exponent(output_exponent), output(NULL)
|
||||
{
|
||||
this->inplace = inplace;
|
||||
}
|
||||
Add2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = "Add2D", bool inplace = false) : Layer(name),
|
||||
activation(activation),
|
||||
output_exponent(output_exponent),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({}) {}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Add2D object
|
||||
*/
|
||||
~Add2D()
|
||||
{
|
||||
if((!this->inplace) && (this->output != NULL))
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
@ -59,10 +62,12 @@ namespace dl
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
@ -78,6 +83,11 @@ namespace dl
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -105,7 +115,11 @@ namespace dl
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -116,6 +130,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::add2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "add2d");
|
||||
}
|
||||
|
@ -24,23 +24,26 @@ namespace dl
|
||||
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of AvgPool2D >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of AvgPool2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of AvgPool2D >*/
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new AvgPool2D object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param name name of layer
|
||||
@ -48,16 +51,23 @@ namespace dl
|
||||
AvgPool2D(const int output_exponent,
|
||||
const std::vector<int> filter_shape,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = NULL) : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter_shape(filter_shape),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type)
|
||||
const char *name = "AvgPool2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter_shape(filter_shape),
|
||||
padding_type(padding_type),
|
||||
padding(padding),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -66,7 +76,7 @@ namespace dl
|
||||
*/
|
||||
~AvgPool2D()
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
@ -76,20 +86,31 @@ namespace dl
|
||||
* @brief Update output shape and padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
std::vector<int> output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
this->output->set_shape(output_shape);
|
||||
assert(input.shape.size() == 3);
|
||||
|
||||
this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
|
||||
this->padding = nn::get_pad_size(output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
input.set_padding_size(this->padding);
|
||||
this->output->free_element();
|
||||
}
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
@ -108,7 +129,6 @@ namespace dl
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return AvgPool2D result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, uint8_t autoload_enable = 0)
|
||||
@ -116,7 +136,11 @@ namespace dl
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_tool_cache.hpp"
|
||||
#include <iostream>
|
||||
|
||||
namespace dl
|
||||
{
|
||||
|
@ -0,0 +1,139 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <vector>
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
#include "dl_nn_concat.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Concat(input1, input2, input3, ...).
|
||||
*
|
||||
* @tparam feature_t support all kinds of integer and float data type
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Concat : Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
int axis; /*<! The axis along which the Tensor will be concatenated. >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Concat >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Concat >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Concat object.
|
||||
*
|
||||
* @param name name of layer
|
||||
* @param axis The axis along which the Tensor will be concatenated.
|
||||
*/
|
||||
Concat(int axis, const char *name = "Concat") : Layer(name), axis(axis), output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Concat object
|
||||
*/
|
||||
~Concat()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Collect inputs' channel and memory offset, called in Model.build().
|
||||
*
|
||||
* @param args pointers of concatenated Tensor
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(std::vector<Tensor<feature_t> *> args, bool print_shape = false)
|
||||
{
|
||||
assert(args.size() > 1);
|
||||
int shape_size = args[0]->shape.size();
|
||||
|
||||
if (this->axis < 0)
|
||||
{
|
||||
this->axis = shape_size + this->axis;
|
||||
}
|
||||
assert((this->axis < shape_size) && (this->axis > -1));
|
||||
|
||||
int output_shape_axis = args[0]->shape[this->axis];
|
||||
|
||||
for (int i = 1; i < args.size(); i++)
|
||||
{
|
||||
assert(shape_size == args[i]->shape.size());
|
||||
assert(args[i]->exponent == args[i - 1]->exponent);
|
||||
output_shape_axis += args[i]->shape[this->axis];
|
||||
|
||||
for (int j = 0; j < shape_size; j++)
|
||||
{
|
||||
if (j != this->axis)
|
||||
{
|
||||
assert(args[i]->shape[j] == args[i - 1]->shape[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this->output_exponent = args[0]->exponent;
|
||||
this->output_shape = args[0]->shape;
|
||||
this->output_shape[this->axis] = output_shape_axis;
|
||||
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Concat operation
|
||||
*
|
||||
* @param inputs the pointers of inputs
|
||||
* @param free_inputs true: free the inputs after call
|
||||
* false: do not free inputs
|
||||
* @return Tensor<feature_t>& concat result
|
||||
*/
|
||||
Tensor<feature_t> &call(std::vector<Tensor<feature_t> *> inputs, bool free_inputs = false)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::concat(*this->output, inputs, this->axis, free_inputs);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "concat");
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Concat result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -13,8 +13,11 @@ namespace dl
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
|
||||
* - int16_t: for int16 quantization and int8 per-channel quantization
|
||||
* - int8_t: for int8 per-tensor quantization
|
||||
*/
|
||||
template <typename feature_t>
|
||||
template <typename feature_t, typename bias_t = feature_t>
|
||||
class Conv2D : public Layer
|
||||
{
|
||||
private:
|
||||
@ -22,14 +25,14 @@ namespace dl
|
||||
const Filter<feature_t> *filter; /*<! filter of Conv2D >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
|
||||
const Bias<feature_t> *bias; /*<! bias of Conv2D, if you don't specify anything, no bias is added >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
const Bias<bias_t> *bias; /*<! bias of Conv2D, if you don't specify anything, no bias is added >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of Conv2D, if you don't specify anything, no activation is applied >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Conv2D >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Conv2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Conv2D >*/
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new Conv2D object.
|
||||
*
|
||||
@ -37,33 +40,43 @@ namespace dl
|
||||
* @param filter filter of Conv2D
|
||||
* @param bias bias of Conv2D, if you don't specify anything, no bias is added
|
||||
* @param activation activation of Conv2D, if you don't specify anything, no activation is applied
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param name name of layer
|
||||
*/
|
||||
Conv2D(const int output_exponent,
|
||||
const Filter<feature_t> *filter,
|
||||
const Bias<feature_t> *bias = NULL,
|
||||
const Bias<bias_t> *bias = NULL,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = NULL) : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter(filter),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type),
|
||||
bias(bias),
|
||||
activation(activation)
|
||||
const char *name = "Conv2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter(filter),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type),
|
||||
bias(bias),
|
||||
activation(activation),
|
||||
padding(padding),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -82,19 +95,30 @@ namespace dl
|
||||
* @brief Update output padding and input padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
assert(this->filter->shape.size() == 4);
|
||||
assert(input.shape[2] == this->filter->shape[2]);
|
||||
|
||||
std::vector<int> output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, true);
|
||||
this->output->set_shape(output_shape);
|
||||
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, true, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
|
||||
this->padding = nn::get_pad_size(output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
|
||||
input.set_padding_size(this->padding);
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -122,7 +146,11 @@ namespace dl
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -153,5 +181,6 @@ namespace dl
|
||||
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace layer
|
||||
} // namespace dl
|
||||
|
@ -13,8 +13,11 @@ namespace dl
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
|
||||
* - int16_t: for int16 quantization and int8 per-channel quantization
|
||||
* - int8_t: for int8 per-tensor quantization
|
||||
*/
|
||||
template <typename feature_t>
|
||||
template <typename feature_t, typename bias_t = feature_t>
|
||||
class DepthwiseConv2D : public Layer
|
||||
{
|
||||
private:
|
||||
@ -22,14 +25,14 @@ namespace dl
|
||||
const Filter<feature_t> *filter; /*<! filter of DepthwiseConv2D >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
|
||||
const Bias<feature_t> *bias; /*<! bias of DepthwiseConv2D, if you don't specify anything, no bias is added >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
const Bias<bias_t> *bias; /*<! bias of DepthwiseConv2D, if you don't specify anything, no bias is added >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of DepthwiseConv2D, if you don't specify anything, no activation is applied >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of DepthwiseConv2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of DepthwiseConv2D >*/
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new DepthwiseConv2D object.
|
||||
*
|
||||
@ -37,40 +40,50 @@ namespace dl
|
||||
* @param filter filter of DepthwiseConv2D
|
||||
* @param bias bias of DepthwiseConv2D, if you don't specify anything, no bias is added
|
||||
* @param activation activation of DepthwiseConv2D, if you don't specify anything, no activation is applied
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y - stride in height
|
||||
* @param stride_x - stride in width
|
||||
* @param name name of layer
|
||||
*/
|
||||
DepthwiseConv2D(const int output_exponent,
|
||||
const Filter<feature_t> *filter,
|
||||
const Bias<feature_t> *bias = NULL,
|
||||
const Bias<bias_t> *bias = NULL,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = NULL) : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter(filter),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type),
|
||||
bias(bias),
|
||||
activation(activation)
|
||||
const char *name = "DepthwiseConv2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
filter(filter),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type),
|
||||
bias(bias),
|
||||
activation(activation),
|
||||
padding(padding),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the DepthwiseConv2D object.
|
||||
*
|
||||
*/
|
||||
~DepthwiseConv2D()
|
||||
~DepthwiseConv2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
@ -82,19 +95,31 @@ namespace dl
|
||||
* @brief Update output shape and padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
assert(this->filter->shape.size() == 4);
|
||||
assert(input.shape[2] == this->filter->shape[2]);
|
||||
|
||||
std::vector<int> output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
|
||||
this->output->set_shape(output_shape);
|
||||
this->output_shape = nn::get_output_shape(input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
|
||||
this->padding = nn::get_pad_size(output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
|
||||
input.set_padding_size(this->padding);
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, this->filter->shape_with_dilation, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -122,7 +147,12 @@ namespace dl
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
|
@ -0,0 +1,128 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class ExpandDims : public Layer
|
||||
{
|
||||
private:
|
||||
std::vector<int> output_shape; /*<! output shape of ExpandDims >*/
|
||||
std::vector<int> axis; /*<! position where the new axis is placed. >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of ExpandDims >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
|
||||
public:
|
||||
int output_exponent;
|
||||
|
||||
/**
|
||||
* @brief Construct a new ExpandDims object
|
||||
*
|
||||
* @param axis position where the new axis is placed.
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
ExpandDims(std::vector<int> axis, const char *name = "ExpandDims", bool inplace = false) : Layer(name),
|
||||
axis(axis), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the ExpandDims object
|
||||
*
|
||||
*/
|
||||
~ExpandDims()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
*
|
||||
* @param input as an input.
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& ExpandDims result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief call ExpandDims opeartion
|
||||
*
|
||||
* @param input
|
||||
* @return Tensor<feature_t>& ExpandDims result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_shape(this->output_shape);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "ExpandDims");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,120 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Flatten : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Flatten >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Flatten >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Flatten object
|
||||
*
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Flatten(const char *name = "Flatten", bool inplace = false) : Layer(name), inplace(inplace), output_shape({})
|
||||
{}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Flatten object
|
||||
*
|
||||
*/
|
||||
~Flatten()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
this->output_shape = {input.get_size()};
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Flatten result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Flatten operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @return Tensor<feature_t>& Flatten result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->flatten();
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->flatten();
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "flatten");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,167 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_nn_fully_connected.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @tparam bias_t supports int16_t and int8_t, must specify when using int8 per-channel quantization
|
||||
* - int16_t: for int16 quantization and int8 per-channel quantization
|
||||
* - int8_t: for int8 per-tensor quantization
|
||||
*/
|
||||
template <typename feature_t, typename bias_t = feature_t>
|
||||
class FullyConnected : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const bool flatten; /*<! true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim] >*/
|
||||
const Filter<feature_t> *filter; /*<! filter of FullyConnected >*/
|
||||
const Bias<bias_t> *bias; /*<! bias of FullyConnected, if you don't specify anything, no bias is added >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of FullyConnected, if you don't specify anything, no activation is applied >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of FullyConnected >*/
|
||||
std::vector<int> output_shape; /*<! output shape of FullyConnected >*/
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new FullyConnected object.
|
||||
*
|
||||
* @param output_exponent exponent of output
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param name name of layer
|
||||
*/
|
||||
FullyConnected(const int output_exponent,
|
||||
const Filter<feature_t> *filter,
|
||||
const Bias<bias_t> *bias = NULL,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const bool flatten = true,
|
||||
const char *name = "FullyConnected") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
flatten(flatten),
|
||||
filter(filter),
|
||||
bias(bias),
|
||||
activation(activation),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the FullyConnected object.
|
||||
*
|
||||
*/
|
||||
~FullyConnected()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output padding and input padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(this->filter->shape.size() == 4);
|
||||
assert(this->filter->shape[0] == 1);
|
||||
assert(this->filter->shape[1] == 1);
|
||||
if (this->flatten)
|
||||
{
|
||||
assert(input.get_size() == this->filter->shape[2]);
|
||||
this->output_shape = {this->filter->shape[3]};
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(input.shape.back() == this->filter->shape[2]);
|
||||
this->output_shape = input.shape;
|
||||
this->output_shape[this->output_shape.size() - 1] = this->filter->shape[3];
|
||||
}
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& FullyConnected result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call FullyConnected operation
|
||||
*
|
||||
* @param input as an input.
|
||||
* @param autoload_enable one of true or false,
|
||||
* - true: load input and output from PSRAM to CACHE automatically
|
||||
* - false: do not
|
||||
* @param assign_core not effective yet
|
||||
* @return FullyConnected result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, bool autoload_enable = false, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
if (autoload_enable)
|
||||
{
|
||||
dl::tool::cache::autoload_func((uint32_t)(this->output->element), this->output->get_size() * sizeof(feature_t),
|
||||
(uint32_t)(input.element), input.get_size() * sizeof(feature_t));
|
||||
}
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::fully_connected(*this->output, input, *(this->filter), this->bias, this->activation, this->flatten, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "fully_connected");
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Preload the filter to Cache.
|
||||
* NOTE: Call this layer's preload() before previous layer's call() such that filter could be loaded while previous layer is doing calculation.
|
||||
*/
|
||||
void preload()
|
||||
{
|
||||
size_t size = sizeof(feature_t);
|
||||
int shape_size = this->filter->shape.size();
|
||||
for (int i = 0; i < shape_size; ++i)
|
||||
{
|
||||
size *= filter->shape[i];
|
||||
}
|
||||
dl::tool::cache::preload_func((uint32_t)(this->filter->element), size);
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -20,8 +20,9 @@ namespace dl
|
||||
class GlobalAveragePool2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of GlobalAveragePool2D >*/
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
std::vector<int> output_shape; /*<! output shape of GlobalAveragePool2D >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of GlobalAveragePool2D >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new GlobalAveragePool2D object.
|
||||
@ -29,8 +30,9 @@ namespace dl
|
||||
* @param output_exponent exponent of output
|
||||
* @param name name of layer
|
||||
*/
|
||||
GlobalAveragePool2D(const int output_exponent, const char *name = NULL) : Layer(name),
|
||||
output_exponent(output_exponent)
|
||||
GlobalAveragePool2D(const int output_exponent, const char *name = "GlobalAveragePool2D") : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
output_shape({})
|
||||
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
@ -52,17 +54,26 @@ namespace dl
|
||||
* @brief Update output shape.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
this->output->set_shape(output_shape);
|
||||
this->output_shape = output_shape;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -90,7 +101,11 @@ namespace dl
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
|
@ -20,15 +20,15 @@ namespace dl
|
||||
class GlobalMaxPool2D : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output ptr of GlobalMaxPool2D >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of GlobalMaxPool2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of GlobalMaxPool2D >*/
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new GlobalMaxPool2D object.
|
||||
*
|
||||
* @param name name of layer
|
||||
*/
|
||||
GlobalMaxPool2D(const char *name = NULL) : Layer(name)
|
||||
GlobalMaxPool2D(const char *name = "GlobalMaxPool2D") : Layer(name), output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -49,17 +49,26 @@ namespace dl
|
||||
* @brief Update output shape and exponent.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
assert(input.shape.size() == 3);
|
||||
this->output->set_exponent(input.exponent);
|
||||
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
this->output->set_shape(output_shape);
|
||||
this->output_shape = output_shape;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -87,7 +96,11 @@ namespace dl
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn_LeakyReLU.hpp"
|
||||
#include "dl_nn_leakyrelu.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
@ -20,13 +20,13 @@ namespace dl
|
||||
class LeakyReLU : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t activation_alpha; /*<! quantized alpha >*/
|
||||
int activation_exponent; /*<! exponent of quantized alpha >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of leakyrelu>*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
feature_t activation_alpha; /*<! quantized alpha >*/
|
||||
int activation_exponent; /*<! exponent of quantized alpha >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of leakyrelu>*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new LeakyReLU object
|
||||
*
|
||||
@ -34,9 +34,9 @@ namespace dl
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
* @param name name of leakyrelu
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
|
||||
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
{
|
||||
this->activation_alpha = activation_alpha;
|
||||
this->activation_exponent = activation_exponent;
|
||||
@ -47,7 +47,7 @@ namespace dl
|
||||
* @brief Destroy the LeakyReLU object
|
||||
*
|
||||
*/
|
||||
~LeakyReLU()
|
||||
~LeakyReLU()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -59,24 +59,32 @@ namespace dl
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
if(!this->inplace)
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_shape(input.shape);
|
||||
}
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -100,10 +108,14 @@ namespace dl
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if(!this->inplace)
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -114,6 +126,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
|
@ -22,28 +22,28 @@ namespace dl
|
||||
class Max2D : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output ptr of max2d >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of max2d >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of max2d >*/
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new Max2D object.
|
||||
*
|
||||
* @param name name of max2d
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Max2D(const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
|
||||
Max2D(const char *name = "Max2D", bool inplace = false) : Layer(name),
|
||||
output(NULL), inplace(inplace), output_shape({})
|
||||
{
|
||||
this->inplace = inplace;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Max2D object
|
||||
*
|
||||
*/
|
||||
~Max2D()
|
||||
~Max2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -58,24 +58,34 @@ namespace dl
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if(!this->inplace)
|
||||
if (!this->inplace)
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(input0.shape);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -100,10 +110,14 @@ namespace dl
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if(!this->inplace)
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -114,6 +128,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::max2d<true>(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
|
||||
}
|
||||
|
@ -23,44 +23,54 @@ namespace dl
|
||||
std::vector<int> filter_shape; /*<! filter shape in [filter_height, filter_width] >*/
|
||||
const int stride_y; /*<! stride in height >*/
|
||||
const int stride_x; /*<! stride in width >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET >*/
|
||||
const padding_type_t padding_type; /*<! one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN >*/
|
||||
std::vector<int> padding; /*<! padding size needed in [top, bottom, left, right] of this operation >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of MaxPool2D >*/
|
||||
std::vector<int> output_shape; /*<! output shape of MaxPool2D >*/
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new MaxPool2D object.
|
||||
*
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN or PADDING_NOT_SET,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* - PADDING_NOT_SET means padding with the specific "padding" value below.
|
||||
* @param padding if padding_type is PADDING_NOT_SET, this value will be used as padding size.
|
||||
* the shape must be 4, the value of each position is: [padding top, padding bottom, padding left, padding right]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param name name of layer
|
||||
*/
|
||||
MaxPool2D(const std::vector<int> filter_shape,
|
||||
const padding_type_t padding_type = PADDING_VALID,
|
||||
std::vector<int> padding = {},
|
||||
const int stride_y = 1,
|
||||
const int stride_x = 1,
|
||||
const char *name = NULL) : Layer(name),
|
||||
filter_shape(filter_shape),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
padding_type(padding_type)
|
||||
const char *name = "MaxPool2D") : Layer(name),
|
||||
filter_shape(filter_shape),
|
||||
padding_type(padding_type),
|
||||
padding(padding),
|
||||
stride_y(stride_y),
|
||||
stride_x(stride_x),
|
||||
output_shape({})
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
if (this->padding_type == PADDING_NOT_SET)
|
||||
{
|
||||
assert(this->padding.size() == 4);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the MaxPool2D object.
|
||||
*
|
||||
*/
|
||||
~MaxPool2D()
|
||||
~MaxPool2D()
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
@ -72,18 +82,29 @@ namespace dl
|
||||
* @brief Update output shape and padding.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
assert(input.shape[0] > 0);
|
||||
assert(input.shape[1] > 0);
|
||||
this->output->set_exponent(input.exponent);
|
||||
std::vector<int> output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
this->output->set_shape(output_shape);
|
||||
assert(input.shape.size() == 3);
|
||||
|
||||
this->padding = nn::get_pad_size(output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
input.set_padding_size(this->padding);
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output_shape = nn::get_output_shape(input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type, false, this->padding);
|
||||
this->output->set_shape(this->output_shape);
|
||||
|
||||
if (this->padding_type != PADDING_NOT_SET)
|
||||
{
|
||||
this->padding = nn::get_pad_size(this->output_shape, input.shape, filter_shape, this->stride_y, this->stride_x, this->padding_type);
|
||||
}
|
||||
this->output->free_element();
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -111,7 +132,11 @@ namespace dl
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
|
@ -22,28 +22,28 @@ namespace dl
|
||||
class Min2D : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output of ptr min2d>*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
public:
|
||||
|
||||
Tensor<feature_t> *output; /*<! output of ptr min2d>*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of min2d >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Min2D object
|
||||
*
|
||||
* @param name name of min2d
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Min2D(const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
|
||||
{
|
||||
this->inplace = inplace;
|
||||
}
|
||||
Min2D(const char *name = "Min2D", bool inplace = false) : Layer(name),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({}) {}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Min2D object
|
||||
*
|
||||
*/
|
||||
~Min2D()
|
||||
~Min2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -58,25 +58,34 @@ namespace dl
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if(!this->inplace)
|
||||
if (!this->inplace)
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_shape(input0.shape);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_exponent(input0.exponent);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -101,10 +110,14 @@ namespace dl
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if(!this->inplace)
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -115,6 +128,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::min2d<true>(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
|
||||
}
|
||||
|
@ -21,14 +21,13 @@ namespace dl
|
||||
class Mul2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of Mul2D, if you don't specify anything, no activation is applied >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Mul2D >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Mul2D >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Mul2D >*/
|
||||
public:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
|
||||
/**
|
||||
* @brief Construct a new Mul2D object.
|
||||
*
|
||||
@ -36,18 +35,24 @@ namespace dl
|
||||
* @param activation activation of Mul2D, if you don't specify anything, no activation is applied
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Mul2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = NULL, bool inplace = false) : Layer(name),
|
||||
output_exponent(output_exponent),activation(activation), output(NULL)
|
||||
Mul2D(const int output_exponent,
|
||||
const Activation<feature_t> *activation = NULL,
|
||||
const char *name = "Mul2D",
|
||||
bool inplace = false) : Layer(name),
|
||||
output_exponent(output_exponent),
|
||||
activation(activation),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({})
|
||||
{
|
||||
this->inplace = inplace;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Multiply2D object.
|
||||
*/
|
||||
~Mul2D()
|
||||
~Mul2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -61,24 +66,34 @@ namespace dl
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
this->output_shape = input0.shape;
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(input0.shape);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
|
||||
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -106,7 +121,11 @@ namespace dl
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -117,6 +136,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
|
||||
}
|
||||
|
@ -24,9 +24,9 @@ namespace dl
|
||||
int activation_exponent; /*<! exponent of quantized alpha elements >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of prelu >*/
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new PReLU object
|
||||
*
|
||||
@ -34,20 +34,25 @@ namespace dl
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
* @param name name of prelu
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
PReLU(const feature_t *activation_element, const int activation_exponent = 0, const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
|
||||
PReLU(const feature_t *activation_element,
|
||||
const int activation_exponent = 0,
|
||||
const char *name = NULL,
|
||||
bool inplace = "PReLU") : Layer(name),
|
||||
activation_element(activation_element),
|
||||
activation_exponent(activation_exponent),
|
||||
output(NULL),
|
||||
inplace(inplace),
|
||||
output_shape({})
|
||||
{
|
||||
this->activation_element = activation_element;
|
||||
this->activation_exponent = activation_exponent;
|
||||
this->inplace = inplace;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the PReLU object
|
||||
*
|
||||
*/
|
||||
~PReLU()
|
||||
~PReLU()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -59,23 +64,31 @@ namespace dl
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
if(!this->inplace)
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -99,11 +112,15 @@ namespace dl
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if(!this->inplace)
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->apply_element();
|
||||
this->output->malloc_element();
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
@ -113,6 +130,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
|
@ -21,29 +21,28 @@ namespace dl
|
||||
class ReLU : public Layer
|
||||
{
|
||||
private:
|
||||
Tensor<feature_t> *output; /*<! output ptr of relu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of relu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of relu >*/
|
||||
public:
|
||||
|
||||
|
||||
/**
|
||||
* @brief Construct a new ReLU object
|
||||
*
|
||||
* @param name name of relu
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
ReLU(const char *name = NULL, bool inplace = false) : Layer(name), output(NULL)
|
||||
ReLU(const char *name = "ReLU", bool inplace = false) : Layer(name),
|
||||
output(NULL), inplace(inplace), output_shape({})
|
||||
{
|
||||
this->inplace = inplace;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the ReLU object
|
||||
*
|
||||
*/
|
||||
~ReLU()
|
||||
~ReLU()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -55,23 +54,31 @@ namespace dl
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input)
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
if(!this->inplace)
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -95,10 +102,14 @@ namespace dl
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if(!this->inplace)
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -109,6 +120,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::relu(*this->output, input, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "relu");
|
||||
}
|
||||
|
@ -0,0 +1,124 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief Reshape(input)
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Reshape : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Reshape >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Reshape >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Reshape object
|
||||
*
|
||||
* @param shape the target shape
|
||||
* @param name name of Reshape layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Reshape(std::vector<int> shape, const char *name = "Reshape", bool inplace = false) : Layer(name),
|
||||
output_shape(shape), inplace(inplace)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Reshape object
|
||||
*
|
||||
*/
|
||||
~Reshape()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Reshape result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Reshape operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @return Tensor<feature_t>& Reshape result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->reshape(this->output_shape);
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "reshape");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->reshape(this->output_shape);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "reshape");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,127 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Squeeze : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Squeeze >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
int axis; /*<! the dim to to be remove. make sure the length of the dim is equal to 1.
|
||||
if axis == INT32_MAX, all the dims with length==1 will be removed. >*/
|
||||
std::vector<int> output_shape; /*<! output shape of AvgPool2D >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Squeeze object
|
||||
*
|
||||
* @param axis the dim to to be remove. make sure the length of the dim is equal to 1.
|
||||
* if axis == INT32_MAX, all the dims with length==1 will be removed.
|
||||
* @param name name of Squeeze layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Squeeze(int axis = INT32_MAX, const char *name = "Squeeze", bool inplace = false) : Layer(name), axis(axis), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Squeeze object
|
||||
*
|
||||
*/
|
||||
~Squeeze()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->squeeze(this->axis);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->squeeze(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Squeeze result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Squeeze operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @return Tensor<feature_t>& Squeeze result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->copy_element(input, true);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "Squeeze");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_shape(this->output_shape);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "Squeeze");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -21,13 +21,13 @@ namespace dl
|
||||
class Sub2D : public Layer
|
||||
{
|
||||
private:
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of Mul2D, if you don't specify anything, no activation is applied >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Sub2D >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a seperate memeory >*/
|
||||
const int output_exponent; /*<! exponent of output >*/
|
||||
const Activation<feature_t> *activation; /*<! activation of Sub2D, if you don't specify anything, no activation is applied >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Sub2D >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Sub2D >*/
|
||||
public:
|
||||
|
||||
/**
|
||||
* @brief Construct a new Sub2D object.
|
||||
*
|
||||
@ -35,18 +35,17 @@ namespace dl
|
||||
* @param activation activation of Mul2D, if you don't specify anything, no activation is applied
|
||||
* @param name name of layer
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a seperate memeory
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Sub2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = NULL, bool inplace = false) : Layer(name),
|
||||
output_exponent(output_exponent), activation(activation), output(NULL)
|
||||
Sub2D(const int output_exponent, const Activation<feature_t> *activation = NULL, const char *name = "Sub2D", bool inplace = false) : Layer(name),
|
||||
output_exponent(output_exponent), activation(activation), output(NULL), inplace(inplace), output_shape({})
|
||||
{
|
||||
this->inplace = inplace;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Sub2D object.
|
||||
*/
|
||||
~Sub2D()
|
||||
~Sub2D()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -60,22 +59,32 @@ namespace dl
|
||||
*
|
||||
* @param input0 as one input
|
||||
* @param input1 as another input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1)
|
||||
void build(Tensor<feature_t> &input0, Tensor<feature_t> &input1, bool print_shape = false)
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
this->output_shape = input0.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if(this->output != NULL)
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(input0.shape);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input0;
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -103,7 +112,11 @@ namespace dl
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output.apply_element();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output.malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
@ -114,6 +127,10 @@ namespace dl
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
if (this->output->shape != this->output_shape)
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::sub2d<true>(this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
|
@ -0,0 +1,126 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_tool.hpp"
|
||||
#include "dl_layer_base.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @tparam feature_t
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class Transpose : public Layer
|
||||
{
|
||||
private:
|
||||
int output_exponent; /*<! exponent of output >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of Transpose >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
false: the output will store to a separate memory >*/
|
||||
std::vector<int> perm; /*<! the new arangement of the dims. if perm == {}, the dims arangement will be reversed. >*/
|
||||
std::vector<int> output_shape; /*<! output shape of Transpose >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Transpose object
|
||||
*
|
||||
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
|
||||
* @param name name of Transpose layer
|
||||
* @param inplace true: the output will store to input
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
Transpose(std::vector<int> perm = {}, const char *name = "Transpose", bool inplace = false) : Layer(name), perm(perm), inplace(inplace), output_shape({})
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Transpose object
|
||||
*
|
||||
*/
|
||||
~Transpose()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
delete this->output;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Update output shape and exponent
|
||||
*
|
||||
* @param input as an input
|
||||
* @param print_shape whether to print the output shape.
|
||||
*/
|
||||
void build(Tensor<feature_t> &input, bool print_shape = false)
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
this->output_shape = input.shape;
|
||||
for (int i = 0; i < this->perm.size(); i++)
|
||||
{
|
||||
this->output_shape[i] = input.shape[this->perm[i]];
|
||||
}
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
std::cout << this->name << " | ";
|
||||
this->output->print_shape();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& Transpose result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
return *this->output;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call Transpose operation.
|
||||
*
|
||||
* @param input as an input.
|
||||
* @return Tensor<feature_t>& Transpose result.
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_INIT();
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->set_exponent(input.exponent);
|
||||
this->output->transpose(input, this->perm);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "transpose");
|
||||
}
|
||||
else
|
||||
{
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
this->output->transpose(this->perm);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "transpose");
|
||||
}
|
||||
return *this->output;
|
||||
}
|
||||
};
|
||||
} // namespace layer
|
||||
} // namespace dl
|
@ -0,0 +1,68 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_image.hpp"
|
||||
|
||||
typedef struct
|
||||
{
|
||||
int area; /*!< Area of connected domains >*/
|
||||
std::vector<int> center; /*<! centroid of connected domains [x, y] >*/
|
||||
std::vector<int> box; /*<! [left_up_x, left_up_y, right_down_x, right_down_y] >*/
|
||||
} components_stats_t;
|
||||
|
||||
class ColorDetector
|
||||
{
|
||||
private:
|
||||
std::vector<std::vector<components_stats_t>> results; /*!< detection results >*/
|
||||
|
||||
public:
|
||||
std::vector<std::vector<uint8_t>> color_thresh; /*!< threshold of colors, The threshold of each color is composed of 6 numbers >*/
|
||||
std::vector<int> area_thresh; /*!< the area threshold of each color,
|
||||
the area that is smaller than the threshold is filtered >*/
|
||||
bool bgr; /*!< true: the input image is in BGR format
|
||||
false: the input image is in RGB format >*/
|
||||
|
||||
/**
|
||||
* @brief get the color threshold of rectangular region in the image
|
||||
*
|
||||
* @param image the input image
|
||||
* @param box the coordinates of the rectanglar region : [left_up_x, left_up_y, right_down_x, right_down_y]
|
||||
* @return std::vector<uint8_t> the threshold.
|
||||
*/
|
||||
std::vector<uint8_t> cal_color_thresh(dl::Tensor<uint8_t> &image, std::vector<int> box);
|
||||
|
||||
/**
|
||||
* @brief detect the colors based on the color thresholds
|
||||
*
|
||||
* @param image the input image.
|
||||
* @return std::vector<std::vector<components_stats_t>>& detection result.
|
||||
*/
|
||||
std::vector<std::vector<components_stats_t>> &detect(dl::Tensor<uint8_t> &image);
|
||||
|
||||
/**
|
||||
* @brief Construct a new Color Detector object
|
||||
*
|
||||
* @param color_thresh threshold of colors, The threshold of each color is composed of 6 numbers
|
||||
* @param area_thresh the area threshold of each color,the area that is smaller than the threshold is filtered
|
||||
* @param bgr true: the input image is in BGR format
|
||||
* false: the input image is in RGB format
|
||||
*/
|
||||
ColorDetector(std::vector<std::vector<uint8_t>> color_thresh, std::vector<int> area_thresh, bool bgr = false) : color_thresh(color_thresh), area_thresh(area_thresh), bgr(bgr)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the Color Detector object
|
||||
*
|
||||
*/
|
||||
~ColorDetector() {}
|
||||
|
||||
/**
|
||||
* @brief Get the results object
|
||||
*
|
||||
* @return std::vector<std::vector<components_stats_t>>& the detection result.
|
||||
*/
|
||||
std::vector<std::vector<components_stats_t>> &get_results()
|
||||
{
|
||||
return this->results;
|
||||
}
|
||||
};
|
@ -92,7 +92,7 @@ namespace face_recognition_tool
|
||||
* @return dl::Tensor<T>*
|
||||
*/
|
||||
template <typename T>
|
||||
dl::Tensor<T> *transform_mfn_input(dl::Tensor<uint8_t> &image, bool free_input = false, bool do_padding = true);
|
||||
dl::Tensor<T> *transform_mfn_input(dl::Tensor<uint8_t> &image, bool free_input = false);
|
||||
|
||||
/**
|
||||
* @brief transform the image to the input of a mfn model
|
||||
@ -106,7 +106,7 @@ namespace face_recognition_tool
|
||||
* false: do not pad the result
|
||||
*/
|
||||
template <typename T>
|
||||
void transform_mfn_input(dl::Tensor<uint8_t> &image, dl::Tensor<T> &output, bool free_input = false, bool do_padding = true);
|
||||
void transform_mfn_input(dl::Tensor<uint8_t> &image, dl::Tensor<T> &output, bool free_input = false);
|
||||
|
||||
/**
|
||||
* @brief transform the mfn output embedding to a floating embedding
|
||||
|
@ -14,13 +14,13 @@ namespace dl
|
||||
* @param filter_shape filter shape with dilation
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param pad_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET
|
||||
* @param pad_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN
|
||||
* @param is_conv2d one of true or false,
|
||||
* - true: serve for Conv2D
|
||||
* - false: serve for other operations
|
||||
* @return std::vector<int>
|
||||
*/
|
||||
std::vector<int> get_output_shape(const std::vector<int> &input_shape, const std::vector<int> &filter_shape, const int stride_y, const int stride_x, const padding_type_t pad_type, const bool is_conv2d = false);
|
||||
std::vector<int> get_output_shape(const std::vector<int> &input_shape, const std::vector<int> &filter_shape, const int stride_y, const int stride_x, const padding_type_t pad_type, const bool is_conv2d = false, std::vector<int> padding = {});
|
||||
|
||||
/**
|
||||
* @brief Get the pad size object
|
||||
@ -30,7 +30,7 @@ namespace dl
|
||||
* @param filter_shape filter shape with dilation
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN
|
||||
* @return padding size
|
||||
*/
|
||||
std::vector<int> get_pad_size(const std::vector<int> &output_shape, const std::vector<int> &input_shape, const std::vector<int> &filter_shape, const int stride_y, const int stride_x, const padding_type_t padding_type);
|
||||
|
@ -58,20 +58,20 @@ namespace dl
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto add2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
Tensor<feature_t> output;
|
||||
if constexpr(!inplace)
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).apply_element();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -58,12 +58,12 @@ namespace dl
|
||||
* @param filter_shape filter_shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID: no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param assign_core not effective yet
|
||||
* @return avg_pool2d result
|
||||
*/
|
||||
@ -81,19 +81,19 @@ namespace dl
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).apply_element();
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
std::vector<int> padding(4, 0);
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME || padding_type == PADDING_SAME_MXNET)
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
std::vector<int> padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
input.set_padding_size(padding);
|
||||
padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
avg_pool2d(output, input, input.padding, filter_shape, stride_y, stride_x, assign_core);
|
||||
avg_pool2d(output, input, padding, filter_shape, stride_y, stride_x, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("avg_pool2d");
|
||||
|
||||
return output;
|
||||
|
63
tools/sdk/esp32/include/esp-face/include/nn/dl_nn_concat.hpp
Normal file
63
tools/sdk/esp32/include/esp-face/include/nn/dl_nn_concat.hpp
Normal file
@ -0,0 +1,63 @@
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
template <typename feature_t>
|
||||
void concat(Tensor<feature_t> &output, std::vector<Tensor<feature_t> *> &inputs, int axis, bool free_inputs = false);
|
||||
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> concat(std::vector<Tensor<feature_t> *> &inputs, int axis, bool free_inputs = false)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
assert(inputs.size() > 1);
|
||||
int shape_size = inputs[0]->shape.size();
|
||||
|
||||
if (axis < 0)
|
||||
{
|
||||
axis = shape_size + axis;
|
||||
}
|
||||
|
||||
assert((axis < shape_size) && (axis > -1));
|
||||
|
||||
int output_shape_axis = inputs[0]->shape[axis];
|
||||
|
||||
for (int i = 1; i < inputs.size(); i++)
|
||||
{
|
||||
assert(shape_size == inputs[i]->shape.size());
|
||||
assert(inputs[i]->exponent == inputs[i - 1]->exponent);
|
||||
output_shape_axis += inputs[i]->shape[axis];
|
||||
|
||||
for (int j = 0; j < shape_size; j++)
|
||||
{
|
||||
if (j != axis)
|
||||
{
|
||||
assert(inputs[i]->shape[j] == inputs[i - 1]->shape[j]);
|
||||
}
|
||||
}
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("assert");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
Tensor<feature_t> output;
|
||||
std::vector<int> output_shape = inputs[0]->shape;
|
||||
output_shape[axis] = output_shape_axis;
|
||||
output.set_shape(output_shape);
|
||||
output.set_exponent(inputs[0]->exponent);
|
||||
output.malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("malloc");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
concat(output, inputs, axis, free_inputs);
|
||||
DL_LOG_NN_LATENCY_END("concat");
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -10,7 +10,6 @@ namespace dl
|
||||
{
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
* NOTE: When padding_type is SAME, make sure padding is already added in input.
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
@ -34,7 +33,6 @@ namespace dl
|
||||
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
* NOTE: When padding_type is SAME, make sure padding is already added in input.
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
@ -56,6 +54,29 @@ namespace dl
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter filter of conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void conv2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int8_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int16_t> *const bias = NULL,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(conv2d(input, filter) + bias).
|
||||
*
|
||||
@ -67,25 +88,25 @@ namespace dl
|
||||
* @param filter Filter of conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID: no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param bias bias of conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @return conv2d result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
template <typename feature_t, typename bias_t>
|
||||
Tensor<feature_t> conv2d(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
const Filter<feature_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const padding_type_t padding_type,
|
||||
const Bias<feature_t> *bias,
|
||||
const Bias<bias_t> *bias,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -94,20 +115,19 @@ namespace dl
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type, true);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).apply_element();
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
std::vector<int> padding(4, 0);
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME || padding_type == PADDING_SAME_MXNET)
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
std::vector<int> padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
input.set_padding_size(padding);
|
||||
input.set_padding_value(padding, 0);
|
||||
padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
conv2d(output, input, input.padding, filter, stride_y, stride_x, bias, activation, assign_core);
|
||||
conv2d(output, input, padding, filter, stride_y, stride_x, bias, activation, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("conv2d");
|
||||
|
||||
return output;
|
||||
|
@ -10,7 +10,6 @@ namespace dl
|
||||
{
|
||||
/**
|
||||
* @brief activate(depthwise_conv2d(input, filter) + bias)
|
||||
* NOTE: When padding_type is SAME, make sure padding is already added in input
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
@ -34,7 +33,6 @@ namespace dl
|
||||
|
||||
/**
|
||||
* @brief activate(depthwise_conv2d(input, filter) + bias)
|
||||
* NOTE: When padding_type is SAME, make sure padding is already added in input
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
@ -56,6 +54,29 @@ namespace dl
|
||||
const Activation<int8_t> *activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activate(depthwise_conv2d(input, filter) + bias)
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param padding padding size needed in [top, bottom, left, right] of this operation
|
||||
* @param filter Filter of depthwise_conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void depthwise_conv2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
std::vector<int> &padding,
|
||||
const Filter<int8_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const Bias<int16_t> *bias = NULL,
|
||||
const Activation<int8_t> *activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(depthwise_conv2d(input, filter) + bias)
|
||||
*
|
||||
@ -67,25 +88,25 @@ namespace dl
|
||||
* @param filter filter of depthwise_conv2d
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param pad_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param pad_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID means no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param bias bias of depthwise_conv2d, if you don't specify anything, no bias is added
|
||||
* @param activation activation of depthwise_conv2d, if you don't specify anything, no activation is applied
|
||||
* @param assign_core not effective yet
|
||||
* @return depthwise_conv2d result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
template <typename feature_t, typename bias_t>
|
||||
Tensor<feature_t> depthwise_conv2d(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
const Filter<feature_t> &filter,
|
||||
const int stride_y,
|
||||
const int stride_x,
|
||||
const padding_type_t padding_type,
|
||||
const Bias<feature_t> *bias,
|
||||
const Bias<bias_t> *bias,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -94,20 +115,20 @@ namespace dl
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).apply_element();
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
std::vector<int> padding(4, 0);
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME || padding_type == PADDING_SAME_MXNET)
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
std::vector<int> padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
input.set_padding_size(padding);
|
||||
input.set_padding_value(padding, 0);
|
||||
padding = get_pad_size(output_shape, input.shape, filter.shape_with_dilation, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
depthwise_conv2d(output, input, input.padding, filter, stride_y, stride_x, bias, activation, assign_core);
|
||||
depthwise_conv2d(output, input, padding, filter, stride_y, stride_x, bias, activation, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("depthwise_conv2d");
|
||||
|
||||
return output;
|
||||
|
@ -0,0 +1,126 @@
|
||||
#pragma once
|
||||
|
||||
#include "dl_constant.hpp"
|
||||
#include "dl_variable.hpp"
|
||||
#include "dl_nn.hpp"
|
||||
|
||||
namespace dl
|
||||
{
|
||||
namespace nn
|
||||
{
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void fully_connected(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const Filter<int16_t> &filter,
|
||||
const Bias<int16_t> *const bias = NULL,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const bool flatten = true,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void fully_connected(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const Filter<int8_t> &filter,
|
||||
const Bias<int8_t> *const bias = NULL,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const bool flatten = true,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @param output as an output
|
||||
* @param input as an input
|
||||
* @param filter filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void fully_connected(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const Filter<int8_t> &filter,
|
||||
const Bias<int16_t> *const bias = NULL,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const bool flatten = true,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief activation(FullyConnected(input, filter) + bias).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
* @param output_exponent exponent of output
|
||||
* @param input as an input
|
||||
* @param filter Filter of FullyConnected
|
||||
* @param bias bias of FullyConnected, if you don't specify anything, no bias is added
|
||||
* @param activation activation of FullyConnected, if you don't specify anything, no activation is applied
|
||||
* @param flatten true: input shape is [x1, x2, ..., xn], filter shape is [1, 1, x1 * x2 * ... * xn, output_dim], output shape is [output_dim]
|
||||
* false: input shape is [x1, x2, ..., xn, input_dim], filter shape is [1, 1, input_dim, output_dim], output shape is [x1, x2, ...., xn, output_dim]
|
||||
* @param assign_core not effective yet
|
||||
* @return FullyConnected result
|
||||
*/
|
||||
template <typename feature_t>
|
||||
Tensor<feature_t> fully_connected(const int output_exponent,
|
||||
Tensor<feature_t> &input,
|
||||
const Filter<feature_t> &filter,
|
||||
const Bias<feature_t> *bias,
|
||||
const Activation<feature_t> *activation,
|
||||
const bool flatten,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
assert(filter.shape.size() == 4);
|
||||
assert(filter.shape[0] == 1);
|
||||
assert(filter.shape[1] == 1);
|
||||
|
||||
std::vector<int> output_shape;
|
||||
if (flatten)
|
||||
{
|
||||
assert(input.get_size() == filter.shape[2]);
|
||||
output_shape = {filter.shape.back()};
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(input.shape.back() == filter->shape[2]);
|
||||
output_shape = input.shape;
|
||||
output_shape[output_shape.size() - 1] = filter.shape.back();
|
||||
}
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
fully_connected(output, input, filter, bias, activation, flatten, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("fully_connected");
|
||||
|
||||
return output;
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -53,7 +53,7 @@ namespace dl
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).apply_element();
|
||||
output.set_exponent(output_exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -51,7 +51,7 @@ namespace dl
|
||||
std::vector<int> output_shape(input.shape.size(), 1);
|
||||
output_shape[2] = input.shape[2];
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(input.exponent).set_shape(output_shape).apply_element();
|
||||
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -52,17 +52,17 @@ namespace dl
|
||||
* @return leakyrelu result or no return(result store to input)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto leakyrelu(Tensor<feature_t> &input,
|
||||
const int activation_alpha,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
auto leakyrelu(Tensor<feature_t> &input,
|
||||
const int activation_alpha,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
if constexpr(!inplace)
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).apply_element();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -48,20 +48,20 @@ namespace dl
|
||||
* @return max2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto max2d(Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
auto max2d(Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr(!inplace)
|
||||
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input0.exponent).set_shape(input0.shape).apply_element();
|
||||
output.set_exponent(input0.exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -57,12 +57,12 @@ namespace dl
|
||||
* @param filter_shape filter shape in [filter_height, filter_width]
|
||||
* @param stride_y stride in height
|
||||
* @param stride_x stride in width
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME or PADDING_SAME_MXNET,
|
||||
* @param padding_type one of PADDING_VALID or PADDING_SAME_END or PADDING_SAME_BEGIN,
|
||||
* - PADDING_VALID: no padding
|
||||
* PADDING_SAME and PADDING_SAME_MXNET results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* PADDING_SAME_END and PADDING_SAME_BEGIN results in padding with zeros evenly to the left/right or up/down of the input
|
||||
* such that output has the same height/width dimension as the input,
|
||||
* - PADDING_SAME results padding in TensorFlow style
|
||||
* - PADDING_SAME_MXNET results padding in MXNET style
|
||||
* - PADDING_SAME_END results padding in TensorFlow style
|
||||
* - PADDING_SAME_BEGIN results padding in MXNET style
|
||||
* @param assign_core not effective yet
|
||||
* @return max_pool2d result
|
||||
*/
|
||||
@ -79,20 +79,20 @@ namespace dl
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
std::vector<int> output_shape = get_output_shape(input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
Tensor<feature_t> output;
|
||||
output.set_exponent(input.exponent).set_shape(output_shape).apply_element();
|
||||
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
std::vector<int> padding(4, 0);
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
if (padding_type == PADDING_SAME || padding_type == PADDING_SAME_MXNET)
|
||||
if (padding_type == PADDING_SAME_END || padding_type == PADDING_SAME_BEGIN)
|
||||
{
|
||||
std::vector<int> padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
input.set_padding_size(padding);
|
||||
input.set_padding_value(padding, 0);
|
||||
padding = get_pad_size(output_shape, input.shape, filter_shape, stride_y, stride_x, padding_type);
|
||||
}
|
||||
DL_LOG_NN_LATENCY_END("padding");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
max_pool2d(output, input, input.padding, filter_shape, stride_y, stride_x, assign_core);
|
||||
max_pool2d(output, input, padding, filter_shape, stride_y, stride_x, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("max_pool2d");
|
||||
|
||||
return output;
|
||||
|
@ -47,20 +47,20 @@ namespace dl
|
||||
* @return min2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto min2d(Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
auto min2d(Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
assert(input0.exponent == input1.exponent);
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr(!inplace)
|
||||
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input0.exponent).set_shape(input0.shape).apply_element();
|
||||
output.set_exponent(input0.exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -18,12 +18,12 @@ namespace dl
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void mul2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
void mul2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(mul2d(input0, input1)).
|
||||
@ -35,12 +35,12 @@ namespace dl
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void mul2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
void mul2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(mul2d(input0, input1)).
|
||||
@ -57,21 +57,21 @@ namespace dl
|
||||
* @return mul2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto mul2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
auto mul2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr(!inplace)
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).apply_element();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -52,17 +52,17 @@ namespace dl
|
||||
* @return prelu result or no return(result store to input)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto prelu(Tensor<feature_t> &input,
|
||||
const feature_t *activation_element,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
auto prelu(Tensor<feature_t> &input,
|
||||
const feature_t *activation_element,
|
||||
const int activation_exponent,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
if constexpr(!inplace)
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).apply_element();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
@ -76,7 +76,7 @@ namespace dl
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
prelu(input, input, activation_element, activation_exponent, assign_core);
|
||||
DL_LOG_NN_LATENCY_END("prelu");
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace nn
|
||||
} // namespace dl
|
@ -15,9 +15,9 @@ namespace dl
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void relu(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
void relu(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief relu(input).
|
||||
@ -26,9 +26,9 @@ namespace dl
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
*/
|
||||
void relu(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
void relu(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
|
||||
|
||||
/**
|
||||
* @brief relu(input)
|
||||
@ -46,11 +46,11 @@ namespace dl
|
||||
{
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
|
||||
if constexpr(!inplace)
|
||||
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).apply_element();
|
||||
output.set_exponent(input.exponent).set_shape(input.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -18,12 +18,12 @@ namespace dl
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void sub2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
void sub2d(Tensor<int16_t> &output,
|
||||
Tensor<int16_t> &input0,
|
||||
Tensor<int16_t> &input1,
|
||||
const Activation<int16_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(sub2d(input0, input1)).
|
||||
@ -35,12 +35,12 @@ namespace dl
|
||||
* @param assign_core not effective yet
|
||||
* @param output_exponent exponent of output, only and must specify if inplace operation happens
|
||||
*/
|
||||
void sub2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
void sub2d(Tensor<int8_t> &output,
|
||||
Tensor<int8_t> &input0,
|
||||
Tensor<int8_t> &input1,
|
||||
const Activation<int8_t> *const activation = NULL,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE,
|
||||
const int output_exponent = INT_MIN);
|
||||
|
||||
/**
|
||||
* @brief activation(sub2d(input0, input1)).
|
||||
@ -57,20 +57,20 @@ namespace dl
|
||||
* @return sub2d result or no return(result store to input0)
|
||||
*/
|
||||
template <bool inplace = false, typename feature_t>
|
||||
auto sub2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
auto sub2d(const int output_exponent,
|
||||
Tensor<feature_t> &input0,
|
||||
Tensor<feature_t> &input1,
|
||||
const Activation<feature_t> *activation,
|
||||
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE) -> typename std::conditional<inplace, void, Tensor<feature_t>>::type
|
||||
{
|
||||
assert(input0.is_same_shape(input1));
|
||||
|
||||
DL_LOG_NN_LATENCY_INIT();
|
||||
Tensor<feature_t> output;
|
||||
if constexpr(!inplace)
|
||||
if constexpr (!inplace)
|
||||
{
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).apply_element();
|
||||
output.set_exponent(output_exponent).set_shape(input0.shape).malloc_element();
|
||||
DL_LOG_NN_LATENCY_END("apply");
|
||||
|
||||
DL_LOG_NN_LATENCY_START();
|
||||
|
@ -67,62 +67,49 @@ namespace dl
|
||||
void copy_memory(void *dst, void *src, const int n);
|
||||
|
||||
/**
|
||||
* @brief Apply memory without initialized. Must use free_aligned() to free the memory.
|
||||
* @brief Apply memory without initialized. Can use free_aligned() to free the memory.
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of aligned, e.g., 16 means 16-byte aligned
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *malloc_aligned(int number, int size, int align = 0)
|
||||
inline void *malloc_aligned(int number, int size, int align = 4)
|
||||
{
|
||||
int n = number * size;
|
||||
n >>= 4;
|
||||
n += 2;
|
||||
n <<= 4;
|
||||
int total_size = n + align + sizeof(void *) + sizeof(int);
|
||||
void *res = malloc(total_size);
|
||||
assert((align > 0) && (((align & (align-1)) == 0)));
|
||||
int total_size = number * size;
|
||||
|
||||
void *res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
#if DL_SPIRAM_SUPPORT
|
||||
if (NULL == res)
|
||||
res = heap_caps_malloc(total_size, MALLOC_CAP_SPIRAM);
|
||||
res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_SPIRAM);
|
||||
#endif
|
||||
if (NULL == res)
|
||||
{
|
||||
printf("Fail to malloc %d bytes from DRAM(%d bytyes) and PSRAM(%d bytes), PSRAM is %s.\n",
|
||||
total_size,
|
||||
heap_caps_get_free_size(MALLOC_CAP_INTERNAL),
|
||||
heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
|
||||
heap_caps_get_free_size(MALLOC_CAP_SPIRAM),
|
||||
DL_SPIRAM_SUPPORT ? "on" : "off");
|
||||
return NULL;
|
||||
}
|
||||
void **data = (void **)res + 2; // 4-byte for pointer, 4-bytes for n
|
||||
void **aligned;
|
||||
if (align)
|
||||
aligned = (void **)(((size_t)data + (align - 1)) & -align);
|
||||
else
|
||||
aligned = data;
|
||||
|
||||
aligned[-1] = res;
|
||||
int *temp = (int *)aligned;
|
||||
temp[-2] = n;
|
||||
|
||||
return (void *)aligned;
|
||||
return (void *)res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory with zero-initialized. Must use dl_lib_free() to free the memory.
|
||||
* @brief Apply memory with zero-initialized. Can use free_aligned() to free the memory.
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of aligned, e.g., 16 means 16-byte aligned
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *calloc_aligned(int number, int size, int align = 0)
|
||||
inline void *calloc_aligned(int number, int size, int align = 4)
|
||||
{
|
||||
|
||||
void *aligned = malloc_aligned(number, size, align);
|
||||
int n = *((int *)aligned - 2);
|
||||
set_zero(aligned, n);
|
||||
set_zero(aligned, number * size);
|
||||
|
||||
return (void *)aligned;
|
||||
}
|
||||
@ -137,7 +124,70 @@ namespace dl
|
||||
if (NULL == address)
|
||||
return;
|
||||
|
||||
free(((void **)address)[-1]);
|
||||
heap_caps_free(address);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory without initialized in preference order: internal aligned, internal, external aligned
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *malloc_aligned_prefer(int number, int size, int align = 4)
|
||||
{
|
||||
assert((align > 0) && (((align & (align-1)) == 0)));
|
||||
int total_size = number * size;
|
||||
void *res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
if (NULL == res){
|
||||
res = heap_caps_malloc(total_size, MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL);
|
||||
}
|
||||
#if DL_SPIRAM_SUPPORT
|
||||
if (NULL == res){
|
||||
res = heap_caps_aligned_alloc(align, total_size, MALLOC_CAP_SPIRAM);
|
||||
}
|
||||
#endif
|
||||
if (NULL == res)
|
||||
{
|
||||
printf("Fail to malloc %d bytes from DRAM(%d bytyes) and PSRAM(%d bytes), PSRAM is %s.\n",
|
||||
total_size,
|
||||
heap_caps_get_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
|
||||
heap_caps_get_free_size(MALLOC_CAP_SPIRAM),
|
||||
DL_SPIRAM_SUPPORT ? "on" : "off");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory with zero-initialized in preference order: internal aligned, internal, external aligned
|
||||
*
|
||||
* @param number number of elements
|
||||
* @param size size of element
|
||||
* @param align number of byte aligned, e.g., 16 means 16-byte aligned
|
||||
* @return pointer of allocated memory. NULL for failed
|
||||
*/
|
||||
inline void *calloc_aligned_prefer(int number, int size, int align = 4)
|
||||
{
|
||||
void *res = malloc_aligned_prefer(number, size, align);
|
||||
set_zero(res, number * size);
|
||||
|
||||
return (void *)res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Free the calloc_aligned_prefer() and malloc_aligned_prefer() memory
|
||||
*
|
||||
* @param address pointer of memory to free
|
||||
*/
|
||||
inline void free_aligned_prefer(void *address)
|
||||
{
|
||||
if (NULL == address)
|
||||
return;
|
||||
|
||||
heap_caps_free(address);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -57,7 +57,8 @@ namespace dl
|
||||
* @param exponent exponent of element
|
||||
* @param shape shape of Filter,
|
||||
* - 1D: reserved
|
||||
* - 2D: [filter_height, filter_width, input_channel, output_channel]
|
||||
* - 2D: for convolution is [filter_height, filter_width, input_channel, output_channel],
|
||||
* for depthwise convolution is [filter_height, filter_width, input_channel, 1]
|
||||
* @param dilation dilation of Filter
|
||||
* - 1D: reserved
|
||||
* - 2D: [dilation_in_height, dilation_in_width]
|
||||
@ -97,6 +98,9 @@ namespace dl
|
||||
{
|
||||
public:
|
||||
using Constant<T>::Constant;
|
||||
std::vector<int> channel_exponent; /*<! exponent for per-channel >*/
|
||||
|
||||
Bias(const T *element, const std::vector<int> channel_exponent, const std::vector<int> shape);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include <stdio.h>
|
||||
#include <vector>
|
||||
#include <assert.h>
|
||||
#include <iostream>
|
||||
|
||||
#include "dl_tool.hpp"
|
||||
|
||||
@ -17,27 +18,20 @@ namespace dl
|
||||
class Tensor
|
||||
{
|
||||
private:
|
||||
int size; /*<! size of element including padding */
|
||||
bool auto_free; /*<! free element when object destroy */
|
||||
int size; /*<! size of element including padding */
|
||||
bool auto_free; /*<! free element when object destroy */
|
||||
std::vector<int> axis_offset; /*<! element offset of each axis */
|
||||
|
||||
public:
|
||||
T *element; /*<! point to element */
|
||||
int exponent; /*<! exponent of element */
|
||||
std::vector<int> shape; /*<! shape of Tensor */
|
||||
/*<! 2D: shape is [height, width, channel] */
|
||||
/*<! 1D: reserved */
|
||||
std::vector<int> shape_with_padding; /*<! shape with padding of Tensor */
|
||||
/*<! 2D: shape_with_padding is [height_with_padding, width_with_padding, channel_with_padding] */
|
||||
/*<! 1D: reserved */
|
||||
std::vector<int> padding; /*<! padding of Tensor */
|
||||
/*<!- 2D: padding format is [top, bottom, left, right] */
|
||||
/*<! - 1D: reserved */
|
||||
T *element; /*<! point to element */
|
||||
int exponent; /*<! exponent of element */
|
||||
std::vector<int> shape; /*<! shape of Tensor */
|
||||
|
||||
/**
|
||||
* @brief Construct a new Tensor object
|
||||
*
|
||||
*/
|
||||
Tensor() : size(-1), auto_free(true), element(NULL), exponent(0) {}
|
||||
Tensor() : auto_free(true), element(NULL), exponent(0) { this->set_shape({0}); }
|
||||
|
||||
/**
|
||||
* @brief Construct a new Tensor object by copying from input.
|
||||
@ -49,21 +43,20 @@ namespace dl
|
||||
*/
|
||||
Tensor(Tensor<T> &input, bool deep) : size(input.size),
|
||||
auto_free(input.auto_free),
|
||||
exponent(input.exponent),
|
||||
shape(input.shape),
|
||||
shape_with_padding(input.shape_with_padding),
|
||||
padding(input.padding)
|
||||
exponent(input.exponent)
|
||||
{
|
||||
if (deep)
|
||||
this->set_shape(input.shape);
|
||||
if (deep && (input.element != NULL))
|
||||
{
|
||||
int size_real = input.shape_with_padding.size() ? input.shape_with_padding[0] * input.shape_with_padding[1] * input.shape_with_padding[2] : 0;
|
||||
T *new_element = (T *)tool::calloc_aligned(size_real, sizeof(T), 16);
|
||||
int size_real = input.get_size();
|
||||
T *new_element = (T *)tool::calloc_aligned_prefer(size_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
}
|
||||
else
|
||||
{
|
||||
this->element = input.element;
|
||||
this->auto_free = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,6 +70,33 @@ namespace dl
|
||||
this->free_element();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief
|
||||
*
|
||||
* @param input an input Tensor
|
||||
* @param deep one of true or false
|
||||
* - true: apply a new memory, copy value from input.element to this new memory
|
||||
* - false: take over input.element to this->element
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> ©_element(Tensor<T> &input, bool deep)
|
||||
{
|
||||
assert(this->get_size() == input.get_size());
|
||||
assert(input.element != NULL);
|
||||
|
||||
this->malloc_element();
|
||||
if (deep)
|
||||
{
|
||||
tool::copy_memory(this->element, input.element, this->get_size() * sizeof(T));
|
||||
}
|
||||
else
|
||||
{
|
||||
this->element = input.element;
|
||||
this->auto_free = false;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the auto free object.
|
||||
*
|
||||
@ -120,190 +140,144 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the shape of Tensor. Initial this->padding = {0}. Initial this->size = -1.
|
||||
* @brief Set the shape of Tensor.
|
||||
*
|
||||
* @param shape shape in
|
||||
* - 2D: [height, width]
|
||||
* @param shape the target shape
|
||||
*
|
||||
* @return self
|
||||
*/
|
||||
Tensor<T> &set_shape(const std::vector<int> shape)
|
||||
Tensor<T> &set_shape(const std::vector<int> shape);
|
||||
|
||||
/**
|
||||
* @brief print the shape of the Tensor
|
||||
*
|
||||
*/
|
||||
void print_shape()
|
||||
{
|
||||
for (int i = 0; i < shape.size(); ++i)
|
||||
if (this->shape.size())
|
||||
{
|
||||
assert(shape[i] > 0);
|
||||
printf("shape = (");
|
||||
for (int i = 0; i < this->shape.size() - 1; i++)
|
||||
{
|
||||
printf("%d, ", this->shape[i]);
|
||||
}
|
||||
printf("%d)\n", this->shape.back());
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("shape = ()\n");
|
||||
}
|
||||
this->shape = shape;
|
||||
this->shape_with_padding = shape;
|
||||
this->size = -1;
|
||||
this->padding = std::vector<int>(((this->shape.size() - 1) << 1), 0);
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Set the padding size object.
|
||||
* @brief flatten the Tensor
|
||||
*
|
||||
* @param padding padding size in
|
||||
* - 2D: [top, bottom, left, right]
|
||||
* @return self
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor &set_padding_size(std::vector<int> &padding)
|
||||
{
|
||||
assert(this->shape.size()); // call Tensor.set_shape() first
|
||||
assert(this->shape.size() == 3); // TODO: || this->shape.size() == 2
|
||||
|
||||
if (this->shape.size() == 3)
|
||||
{
|
||||
std::vector<int> new_padding = this->padding;
|
||||
bool dont_update = true;
|
||||
|
||||
if (padding[0] > this->padding[0])
|
||||
{
|
||||
new_padding[0] = padding[0];
|
||||
dont_update = false;
|
||||
}
|
||||
|
||||
if (padding[1] > this->padding[1])
|
||||
{
|
||||
new_padding[1] = padding[1];
|
||||
dont_update = false;
|
||||
}
|
||||
|
||||
if (padding[2] > this->padding[2])
|
||||
{
|
||||
new_padding[2] = padding[2];
|
||||
dont_update = false;
|
||||
}
|
||||
|
||||
if (padding[3] > this->padding[3])
|
||||
{
|
||||
new_padding[3] = padding[3];
|
||||
dont_update = false;
|
||||
}
|
||||
|
||||
if (dont_update)
|
||||
{
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::vector<int> new_shape_with_padding = this->shape;
|
||||
|
||||
new_shape_with_padding[0] += (new_padding[0] + new_padding[1]);
|
||||
new_shape_with_padding[1] += (new_padding[2] + new_padding[3]);
|
||||
int new_size = new_shape_with_padding[0] * new_shape_with_padding[1] * new_shape_with_padding[2];
|
||||
|
||||
if (this->element) // if this->element != NULL, do padding by copy memory
|
||||
{
|
||||
T *new_element = (T *)tool::malloc_aligned(new_size, sizeof(T), 16);
|
||||
T *dst = new_element + ((new_padding[0] * new_shape_with_padding[1]) + new_padding[2]) * new_shape_with_padding[2];
|
||||
T *src = this->get_element_ptr();
|
||||
int offset_dst_next_y = new_shape_with_padding[1] * new_shape_with_padding[2]; // width * channel
|
||||
int src_copy_length = this->shape[1] * this->shape[2]; // width * channel
|
||||
int offset_src_next_y = this->shape_with_padding[1] * this->shape_with_padding[2]; // width * channel
|
||||
for (int y = 0; y < this->shape[0]; y++)
|
||||
{
|
||||
tool::copy_memory(dst, src, src_copy_length * sizeof(T));
|
||||
dst += offset_dst_next_y;
|
||||
src += offset_src_next_y;
|
||||
}
|
||||
|
||||
if (this->auto_free)
|
||||
tool::free_aligned(this->element);
|
||||
this->element = new_element;
|
||||
this->auto_free = true;
|
||||
}
|
||||
this->padding = new_padding;
|
||||
this->shape_with_padding = new_shape_with_padding;
|
||||
this->size = new_size;
|
||||
}
|
||||
else if (this->shape.size() == 2)
|
||||
{
|
||||
printf("Tensor.set_padding_size with this->shape.size() == 2 not implement yet.\n");
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
Tensor<T> &flatten();
|
||||
|
||||
/**
|
||||
* @brief Set the padding value object.
|
||||
* @brief Change a new shape to the Tensor without changing its data.
|
||||
*
|
||||
* @param padding padding size in
|
||||
* - 2D: [top, bottom, left, right]
|
||||
* @param value value to set
|
||||
* @return self
|
||||
* @param shape the target shape
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &set_padding_value(std::vector<int> &padding, T value);
|
||||
Tensor<T> &reshape(std::vector<int> shape);
|
||||
|
||||
/**
|
||||
* @brief Remove dims with length==1 from Tensor
|
||||
*
|
||||
* @param axis the dim to to be remove. make sure the length of the dim is equal to 1.
|
||||
* if axis == INT32_MAX, all the dims with length==1 will be removed.
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &squeeze(int axis = INT32_MAX);
|
||||
|
||||
/**
|
||||
* @brief Insert a new dim that will appear at the axis position in the expanded Tensor shape.
|
||||
*
|
||||
* @param axis the dim to be inserted
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &expand_dims(int axis);
|
||||
|
||||
/**
|
||||
* @brief Insert a new dim that will appear at the axis position in the expanded Tensor shape.
|
||||
*
|
||||
* @param axis the dim to be inserted
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &expand_dims(std::vector<int> axis);
|
||||
|
||||
/**
|
||||
* @brief Reverse or permute the axes of the Tensor
|
||||
*
|
||||
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &transpose(std::vector<int> perm = {});
|
||||
|
||||
/**
|
||||
* @brief Reverse or permute the axes of the input Tensor
|
||||
*
|
||||
* @param input the input Tensor
|
||||
* @param perm the new arangement of the dims. if perm == {}, the dims arangement will be reversed.
|
||||
* @return Tensor<T>& self
|
||||
*/
|
||||
Tensor<T> &transpose(Tensor<T> &input, std::vector<int> perm = {});
|
||||
|
||||
/**
|
||||
* @brief Get the element pointer.
|
||||
*
|
||||
* @param padding padding size in
|
||||
* - 2D: [top, bottom, left, right]
|
||||
* @return pointer to memory with padding
|
||||
* @return pointer to memory
|
||||
*/
|
||||
T *get_element_ptr(const std::vector<int> padding = {0, 0, 0, 0})
|
||||
T *get_element_ptr()
|
||||
{
|
||||
assert(this->shape.size() == 3); // TODO: || this->shape.size() == 2
|
||||
|
||||
if (this->shape.size() == 3)
|
||||
{
|
||||
return this->element + ((this->padding[0] - padding[0]) * this->shape_with_padding[1] + (this->padding[2] - padding[2])) * this->shape_with_padding[2];
|
||||
}
|
||||
else if (this->shape.size() == 2)
|
||||
{
|
||||
printf("Tensor.get_element_ptr with this->shape.size() == 2 is not implemented.\n");
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return this->element;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the element value.
|
||||
*
|
||||
* @param index index in
|
||||
* - 2D: [y, x, c]
|
||||
* @param with_padding one of true or false,
|
||||
* - true: make padding size in count
|
||||
* - false: do not
|
||||
* @return element value
|
||||
* @param index the index of each dim.
|
||||
* @return T element value
|
||||
*/
|
||||
T &get_element_value(const std::vector<int> index, const bool with_padding = false)
|
||||
T get_element_value(const std::vector<int> index)
|
||||
{
|
||||
assert(index.size() == this->shape.size());
|
||||
assert(this->shape.size() == 3); // TODO: || this->shape() == 2
|
||||
|
||||
int i = 0;
|
||||
if (this->shape.size() == 3)
|
||||
{
|
||||
int y = index[0];
|
||||
int x = index[1];
|
||||
int c = index[2];
|
||||
i = with_padding ? (y * this->shape_with_padding[1] + x) * this->shape_with_padding[2] + c : ((y + this->padding[0]) * this->shape_with_padding[1] + x + this->padding[2]) * this->shape_with_padding[2] + c;
|
||||
}
|
||||
else if (this->shape.size() == 2)
|
||||
{
|
||||
printf("Tensor.get_element_value with this->shape.size() == 2 is not implemented.\n");
|
||||
}
|
||||
|
||||
return this->element[i];
|
||||
return this->element[this->get_element_index(index)];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the size of element.
|
||||
* @brief Get the element value.
|
||||
*
|
||||
* @return size of element including padding
|
||||
* @param index the index of the element.
|
||||
* @return T element value
|
||||
*/
|
||||
T get_element_value(int index)
|
||||
{
|
||||
return this->element[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the size of Tensor.
|
||||
*
|
||||
* @return the size of Tensor.
|
||||
*/
|
||||
int get_size()
|
||||
{
|
||||
if (this->size == -1) // didn't call Tensor.set_padding_size() before
|
||||
{
|
||||
this->size = 1;
|
||||
for (std::vector<int>::iterator d = this->shape.begin(); d != this->shape.end(); d++)
|
||||
this->size *= *d;
|
||||
}
|
||||
|
||||
return this->size;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the axis offset
|
||||
*
|
||||
* @return std::vector<int> the axis offset
|
||||
*/
|
||||
std::vector<int> get_axis_offset()
|
||||
{
|
||||
return this->axis_offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Apply memory with zero-initialized only if this->element is NULL.
|
||||
*
|
||||
@ -319,7 +293,7 @@ namespace dl
|
||||
if (this->element != NULL)
|
||||
return false;
|
||||
|
||||
this->element = (T *)dl::tool::calloc_aligned(this->get_size(), sizeof(T), 16);
|
||||
this->element = (T *)dl::tool::calloc_aligned_prefer(this->get_size(), sizeof(T), 16);
|
||||
this->auto_free = auto_free;
|
||||
|
||||
return true;
|
||||
@ -340,31 +314,7 @@ namespace dl
|
||||
if (this->element != NULL)
|
||||
return false;
|
||||
|
||||
this->element = (T *)tool::malloc_aligned(this->get_size(), sizeof(T), 16);
|
||||
this->auto_free = auto_free;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief If this->element != NULL no memory will be applied and no value will be set in padding.
|
||||
* Else apply memory without initialized and set value to padding.
|
||||
*
|
||||
* @param padding_value value to set in padding
|
||||
* @param auto_free one of true of false
|
||||
* - true: free element when object destroyed
|
||||
* - false: do not
|
||||
* @return
|
||||
* - true: apply memory and set padding value successfully
|
||||
* - false: no memory applied and no padding value set
|
||||
*/
|
||||
bool apply_element(const T padding_value = 0, const bool auto_free = true)
|
||||
{
|
||||
if (this->element != NULL)
|
||||
return false;
|
||||
|
||||
this->element = (T *)tool::malloc_aligned(this->get_size(), sizeof(T), 16);
|
||||
this->set_padding_value(this->padding, padding_value);
|
||||
this->element = (T *)tool::malloc_aligned_prefer(this->get_size(), sizeof(T), 16);
|
||||
this->auto_free = auto_free;
|
||||
|
||||
return true;
|
||||
@ -379,258 +329,56 @@ namespace dl
|
||||
{
|
||||
if (this->auto_free && this->element)
|
||||
{
|
||||
tool::free_aligned(this->element);
|
||||
tool::free_aligned_prefer(this->element);
|
||||
this->element = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Print the shape of Tensor in format "shape = ({top_padding} + {height} + {bottom_padding}, {left_padding} + {width} + {right_padding}, {channel}(channel_with_padding))\n".
|
||||
* @brief print the element of the tensor
|
||||
*
|
||||
* @param axis_index_range the element range of each dims to be print. if axis_index_range == {}, all the element will be print.
|
||||
* @param message to print
|
||||
*/
|
||||
void print_shape()
|
||||
{
|
||||
printf("shape = (%d + %d + %d, %d + %d + %d, %d(%d))\n",
|
||||
this->padding[0], this->shape[0], this->padding[1],
|
||||
this->padding[2], this->shape[1], this->padding[3],
|
||||
this->shape[2], this->shape_with_padding[2]);
|
||||
}
|
||||
void print(std::vector<int> axis_index_range = {}, const char *message = "");
|
||||
|
||||
/**
|
||||
* @brief Take numpy for example, this function print Tensor[y_start:y_end, x_start:x_end, c_start:c_end].
|
||||
* @brief print all the element of the Tensor.
|
||||
*
|
||||
* inner box is effective value of Tensor, "0" around is padding.
|
||||
*
|
||||
* (with padding)
|
||||
* 00000000000000000000000000000000000000000000000000
|
||||
* 00000000000000000000000000000000000000000000000000
|
||||
* 00000000000000000000000000000000000000000000000000
|
||||
* 000000(without padding) 00000000
|
||||
* 000000 00000000
|
||||
* 000000 00000000
|
||||
* 000000 effective value 00000000
|
||||
* 000000 00000000
|
||||
* 000000 00000000
|
||||
* 00000000000000000000000000000000000000000000000000
|
||||
* 00000000000000000000000000000000000000000000000000
|
||||
* 00000000000000000000000000000000000000000000000000
|
||||
*
|
||||
* @param y_start start index in height
|
||||
* @param y_end end index in height
|
||||
* @param x_start start index in width
|
||||
* @param x_end end index in width
|
||||
* @param c_start start index in channel
|
||||
* @param c_end end index in channel
|
||||
* @param message to print
|
||||
* @param axis print aligned this axis, effective only if all y_end - y_start, x_end - x_start and c_end - c_start equals to 1
|
||||
* @param message to print
|
||||
* @param with_padding one of true or false,
|
||||
* - true: count from (with padding) in upper image
|
||||
* - false: count from (without padding) in upper image
|
||||
* - true: the padding element will also be ed
|
||||
* - false: the padding element will not be ed
|
||||
*/
|
||||
void print(int y_start, int y_end,
|
||||
int x_start, int x_end,
|
||||
int c_start, int c_end,
|
||||
const char *message, int axis = 0, const bool with_padding = false)
|
||||
void print_all(const char *message = "")
|
||||
{
|
||||
assert(y_end > y_start);
|
||||
assert(x_end > x_start);
|
||||
assert(c_end > c_start);
|
||||
|
||||
y_start = DL_MAX(y_start, 0);
|
||||
x_start = DL_MAX(x_start, 0);
|
||||
c_start = DL_MAX(c_start, 0);
|
||||
if (with_padding)
|
||||
{
|
||||
y_end = DL_MIN(y_end, this->shape_with_padding[0]);
|
||||
x_end = DL_MIN(x_end, this->shape_with_padding[1]);
|
||||
c_end = DL_MIN(c_end, this->shape_with_padding[2]);
|
||||
}
|
||||
else
|
||||
{
|
||||
y_end = DL_MIN(y_end, this->shape[0]);
|
||||
x_end = DL_MIN(x_end, this->shape[1]);
|
||||
c_end = DL_MIN(c_end, this->shape[2]);
|
||||
}
|
||||
|
||||
printf("%s[%d:%d, %d:%d, %d:%d] | ", message, y_start, y_end, x_start, x_end, c_start, c_end);
|
||||
std::cout << "\n"
|
||||
<< message << " | ";
|
||||
this->print_shape();
|
||||
|
||||
if (y_end - y_start == 1)
|
||||
for (int i = 0; i < this->get_size(); i++)
|
||||
{
|
||||
if (x_end - x_start == 1)
|
||||
{
|
||||
for (int c = c_start; c < c_end; c++)
|
||||
printf("%7d", c);
|
||||
printf("\n");
|
||||
|
||||
for (int c = c_start; c < c_end; c++)
|
||||
printf("%7d", this->get_element_value({y_start, x_start, c}, with_padding));
|
||||
printf("\n");
|
||||
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (c_end - c_start == 1)
|
||||
{
|
||||
for (int x = x_start; x < x_end; x++)
|
||||
printf("%7d", x);
|
||||
printf("\n");
|
||||
|
||||
for (int x = x_start; x < x_end; x++)
|
||||
printf("%7d", this->get_element_value({y_start, x, c_start}, with_padding));
|
||||
printf("\n");
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
std::cout << this->element[i] << " ";
|
||||
}
|
||||
else
|
||||
{
|
||||
if (x_end - x_start == 1)
|
||||
{
|
||||
if (c_end - c_start == 1)
|
||||
{
|
||||
for (int y = y_start; y < y_end; y++)
|
||||
printf("%7d", y);
|
||||
printf("\n");
|
||||
|
||||
for (int y = y_start; y < y_end; y++)
|
||||
printf("%7d", this->get_element_value({y, x_start, c_start}, with_padding));
|
||||
printf("\n");
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (y_end - y_start == 1)
|
||||
axis = 0;
|
||||
|
||||
if (x_end - x_start == 1)
|
||||
axis = 1;
|
||||
|
||||
if (c_end - c_start == 1)
|
||||
axis = 2;
|
||||
|
||||
if (axis == 0)
|
||||
{
|
||||
// ______c
|
||||
// |
|
||||
// |
|
||||
// x
|
||||
//
|
||||
for (int y = y_start; y < y_end; y++)
|
||||
{
|
||||
printf("y = %d\n ", y);
|
||||
|
||||
for (int c = c_start; c < c_end; c++)
|
||||
printf("%7d", c);
|
||||
printf("\n");
|
||||
|
||||
for (int x = x_start; x < x_end; x++)
|
||||
{
|
||||
printf("%5d", x);
|
||||
for (int c = c_start; c < c_end; c++)
|
||||
printf("%7d", this->get_element_value({y, x, c}, with_padding));
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
else if (axis == 1)
|
||||
{
|
||||
// ______c
|
||||
// |
|
||||
// |
|
||||
// y
|
||||
//
|
||||
for (int x = x_start; x < x_end; x++)
|
||||
{
|
||||
printf("x = %d\n ", x);
|
||||
|
||||
for (int c = c_start; c < c_end; c++)
|
||||
printf("%7d", c);
|
||||
printf("\n");
|
||||
|
||||
for (int y = y_start; y < y_end; y++)
|
||||
{
|
||||
printf("%5d", y);
|
||||
for (int c = c_start; c < c_end; c++)
|
||||
printf("%7d", this->get_element_value({y, x, c}, with_padding));
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// ______x
|
||||
// |
|
||||
// |
|
||||
// y
|
||||
//
|
||||
for (int c = c_start; c < c_end; c++)
|
||||
{
|
||||
printf("c = %d\n ", c);
|
||||
|
||||
for (int x = x_start; x < x_end; x++)
|
||||
printf("%7d", x);
|
||||
printf("\n");
|
||||
|
||||
for (int y = y_start; y < y_end; y++)
|
||||
{
|
||||
printf("%5d", y);
|
||||
for (int x = x_start; x < x_end; x++)
|
||||
printf("%7d", this->get_element_value({y, x, c}, with_padding));
|
||||
printf("\n");
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "\n";
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief print all the element of the Tensor.
|
||||
* @brief Get the index of each dims
|
||||
*
|
||||
* @param message to print
|
||||
* @param with_padding one of true or false,
|
||||
* - true: the padding element will also be printed
|
||||
* - false: the padding element will not be printed
|
||||
* @param element_index the index of the element
|
||||
* @return std::vector<int> the index of each dims
|
||||
*/
|
||||
void print_all(const char *message, const bool with_padding = false)
|
||||
{
|
||||
int y_end;
|
||||
int x_end;
|
||||
int c_end;
|
||||
if (with_padding)
|
||||
{
|
||||
y_end = this->shape_with_padding[0];
|
||||
x_end = this->shape_with_padding[1];
|
||||
c_end = this->shape_with_padding[2];
|
||||
}
|
||||
else
|
||||
{
|
||||
y_end = this->shape[0];
|
||||
x_end = this->shape[1];
|
||||
c_end = this->shape[2];
|
||||
}
|
||||
std::vector<int> get_axis_index(int element_index);
|
||||
|
||||
printf("\n%s | ", message);
|
||||
this->print_shape();
|
||||
|
||||
for (int y = 0; y < y_end; y++)
|
||||
{
|
||||
for (int x = 0; x < x_end; x++)
|
||||
{
|
||||
for (int c = 0; c < c_end; c++)
|
||||
printf("%d ", this->get_element_value({y, x, c}, with_padding));
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
return;
|
||||
}
|
||||
/**
|
||||
* @brief Get the index of element
|
||||
*
|
||||
* @param axis_index the index of each dims
|
||||
* @return int the index of element
|
||||
*/
|
||||
int get_element_index(const std::vector<int> axis_index);
|
||||
|
||||
/**
|
||||
* @brief Check the element value with input ground-truth.
|
||||
@ -638,35 +386,39 @@ namespace dl
|
||||
* @param gt_element ground-truth value of element
|
||||
* @param bias permissible error
|
||||
* @param info one of true or false
|
||||
* - true: print shape and result
|
||||
* - true: shape and result
|
||||
* - false: do not
|
||||
* @param failed_number maximum number of wrong element that will be printed
|
||||
*
|
||||
* @return
|
||||
* - true: in permissible error
|
||||
* - false: not
|
||||
*/
|
||||
bool check_element(T *gt_element, int bias = 2, bool info = true)
|
||||
bool check_element(T *gt_element, int bias = 2, bool info = true, int failed_number = 0)
|
||||
{
|
||||
int count = 0;
|
||||
if (info)
|
||||
this->print_shape();
|
||||
int i = 0;
|
||||
for (int y = 0; y < this->shape[0]; y++)
|
||||
int size = this->get_size();
|
||||
for (int i = 0; i < size; i++)
|
||||
{
|
||||
for (int x = 0; x < this->shape[1]; x++)
|
||||
if (DL_ABS(this->element[i] - gt_element[i]) > bias)
|
||||
{
|
||||
for (int c = 0; c < this->shape[2]; c++)
|
||||
std::vector<int> index = get_axis_index(i);
|
||||
std::cout << "element[";
|
||||
for (int j = 0; j < index.size() - 1; j++)
|
||||
{
|
||||
int a = this->get_element_value({y, x, c});
|
||||
int b = gt_element[i];
|
||||
int offset = DL_ABS(a - b);
|
||||
if (offset > bias)
|
||||
{
|
||||
printf("element[%d, %d, %d]: %d v.s. %d\n", y, x, c, a, b);
|
||||
return false;
|
||||
}
|
||||
i++;
|
||||
std::cout << index[j] << ", ";
|
||||
}
|
||||
std::cout << index.back() << "]: ";
|
||||
std::cout << +this->element[i] << " v.s. " << +gt_element[i] << "\n";
|
||||
count++;
|
||||
if (count > failed_number)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (count)
|
||||
return false;
|
||||
|
||||
if (info)
|
||||
printf("PASS\n");
|
||||
@ -700,35 +452,44 @@ namespace dl
|
||||
|
||||
Tensor<T> &operator=(const Tensor<T> &input)
|
||||
{
|
||||
this->size = input.size;
|
||||
this->auto_free = input.auto_free;
|
||||
this->exponent = input.exponent;
|
||||
this->shape = input.shape;
|
||||
this->padding = input.padding;
|
||||
int size_real_tmp = this->shape_with_padding.size() ? this->shape_with_padding[0] * this->shape_with_padding[1] * this->shape_with_padding[2] : 0;
|
||||
int size_input_real = input.shape_with_padding.size() ? input.shape_with_padding[0] * input.shape_with_padding[1] * input.shape_with_padding[2] : 0;
|
||||
this->shape_with_padding = input.shape_with_padding;
|
||||
if (this->element)
|
||||
int size_real_tmp = this->size;
|
||||
int size_input_real = input.size;
|
||||
this->set_shape(input.shape);
|
||||
if (input.element)
|
||||
{
|
||||
if (size_real_tmp != size_input_real)
|
||||
if (this->element)
|
||||
{
|
||||
tool::free_aligned(this->element);
|
||||
T *new_element = (T *)tool::calloc_aligned(size_input_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
if (size_real_tmp != size_input_real)
|
||||
{
|
||||
tool::free_aligned_prefer(this->element);
|
||||
T *new_element = (T *)tool::malloc_aligned_prefer(size_input_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
}
|
||||
else
|
||||
{
|
||||
tool::copy_memory(this->element, input.element, size_input_real * sizeof(T));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
tool::copy_memory(this->element, input.element, size_input_real * sizeof(T));
|
||||
T *new_element = (T *)tool::malloc_aligned_prefer(size_input_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
else
|
||||
{
|
||||
T *new_element = (T *)tool::calloc_aligned(size_input_real, sizeof(T), 16);
|
||||
tool::copy_memory(new_element, input.element, size_input_real * sizeof(T));
|
||||
this->element = new_element;
|
||||
if (this->element)
|
||||
{
|
||||
tool::free_aligned_prefer(this->element);
|
||||
this->element = NULL;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
} // namespace dl
|
@ -44,6 +44,7 @@ typedef enum {
|
||||
#if SOC_PM_SUPPORT_CPU_PD
|
||||
ESP_PD_DOMAIN_CPU, //!< CPU core
|
||||
#endif
|
||||
ESP_PD_DOMAIN_RTC8M, //!< Internal 8M oscillator
|
||||
ESP_PD_DOMAIN_VDDSDIO, //!< VDD_SDIO
|
||||
ESP_PD_DOMAIN_MAX //!< Number of domains
|
||||
} esp_sleep_pd_domain_t;
|
||||
|
@ -65,6 +65,22 @@ esp_err_t esp_lcd_panel_io_tx_color(esp_lcd_panel_io_handle_t io, int lcd_cmd, c
|
||||
*/
|
||||
esp_err_t esp_lcd_panel_io_del(esp_lcd_panel_io_handle_t io);
|
||||
|
||||
/**
|
||||
* @brief Type of LCD panel IO event data
|
||||
*/
|
||||
typedef struct {
|
||||
} esp_lcd_panel_io_event_data_t;
|
||||
|
||||
/**
|
||||
* @brief Declare the prototype of the function that will be invoked when panel IO finishes transferring color data
|
||||
*
|
||||
* @param[in] panel_io LCD panel IO handle, which is created by factory API like `esp_lcd_new_panel_io_spi()`
|
||||
* @param[in] edata Panel IO event data, fed by driver
|
||||
* @param[in] user_ctx User data, passed from `esp_lcd_panel_io_xxx_config_t`
|
||||
* @return Whether a high priority task has been waken up by this function
|
||||
*/
|
||||
typedef bool (*esp_lcd_panel_io_color_trans_done_cb_t)(esp_lcd_panel_io_handle_t panel_io, esp_lcd_panel_io_event_data_t *edata, void *user_ctx);
|
||||
|
||||
/**
|
||||
* @brief Panel IO configuration structure, for SPI interface
|
||||
*/
|
||||
@ -74,8 +90,8 @@ typedef struct {
|
||||
int spi_mode; /*!< Traditional SPI mode (0~3) */
|
||||
unsigned int pclk_hz; /*!< Frequency of pixel clock */
|
||||
size_t trans_queue_depth; /*!< Size of internal transaction queue */
|
||||
bool (*on_color_trans_done)(esp_lcd_panel_io_handle_t panel_io, void *user_data, void *event_data); /*!< Callback, invoked when color data transfer has finished */
|
||||
void *user_data; /*!< User private data, passed directly to on_trans_frame_done's user_data */
|
||||
esp_lcd_panel_io_color_trans_done_cb_t on_color_trans_done; /*!< Callback invoked when color data transfer has finished */
|
||||
void *user_ctx; /*!< User private data, passed directly to on_color_trans_done's user_ctx */
|
||||
int lcd_cmd_bits; /*!< Bit-width of LCD command */
|
||||
int lcd_param_bits; /*!< Bit-width of LCD parameter */
|
||||
struct {
|
||||
@ -100,8 +116,8 @@ esp_err_t esp_lcd_new_panel_io_spi(esp_lcd_spi_bus_handle_t bus, const esp_lcd_p
|
||||
|
||||
typedef struct {
|
||||
uint32_t dev_addr; /*!< I2C device address */
|
||||
bool (*on_color_trans_done)(esp_lcd_panel_io_handle_t panel_io, void *user_data, void *event_data); /*!< Callback, invoked when color data transfer has finished */
|
||||
void *user_data; /*!< User private data, passed directly to on_trans_frame_done's user_data */
|
||||
esp_lcd_panel_io_color_trans_done_cb_t on_color_trans_done; /*!< Callback invoked when color data transfer has finished */
|
||||
void *user_ctx; /*!< User private data, passed directly to on_color_trans_done's user_ctx */
|
||||
size_t control_phase_bytes; /*!< I2C LCD panel will encode control information (e.g. D/C seclection) into control phase, in several bytes */
|
||||
unsigned int dc_bit_offset; /*!< Offset of the D/C selection bit in control phase */
|
||||
int lcd_cmd_bits; /*!< Bit-width of LCD command */
|
||||
@ -168,8 +184,8 @@ typedef struct {
|
||||
int cs_gpio_num; /*!< GPIO used for CS line, set to -1 will declaim exclusively use of I80 bus */
|
||||
unsigned int pclk_hz; /*!< Frequency of pixel clock */
|
||||
size_t trans_queue_depth; /*!< Transaction queue size, larger queue, higher throughput */
|
||||
bool (*on_color_trans_done)(esp_lcd_panel_io_handle_t panel_io, void *user_data, void *event_data); /*!< Callback, invoked when color data was tranferred done */
|
||||
void *user_data; /*!< User private data, passed directly to on_trans_done's user_data */
|
||||
esp_lcd_panel_io_color_trans_done_cb_t on_color_trans_done; /*!< Callback invoked when color data was tranferred done */
|
||||
void *user_ctx; /*!< User private data, passed directly to on_color_trans_done's user_ctx */
|
||||
int lcd_cmd_bits; /*!< Bit-width of LCD command */
|
||||
int lcd_param_bits; /*!< Bit-width of LCD parameter */
|
||||
struct {
|
||||
|
@ -18,6 +18,37 @@ extern "C" {
|
||||
#if SOC_LCD_RGB_SUPPORTED
|
||||
/**
|
||||
* @brief LCD RGB timing structure
|
||||
*
|
||||
* Total Width
|
||||
* <--------------------------------------------------->
|
||||
* Hsync width HBP Active Width HFP
|
||||
* <---><--><--------------------------------------><--->
|
||||
* ____ ____|_______________________________________|____|
|
||||
* |___| | | |
|
||||
* | | |
|
||||
* __| | | |
|
||||
* /|\ /|\ | | | |
|
||||
* | VSYNC| | | | |
|
||||
* |Width\|/ |__ | | |
|
||||
* | /|\ | | | |
|
||||
* | VBP | | | | |
|
||||
* | \|/_____|_________|_______________________________________| |
|
||||
* | /|\ | | / / / / / / / / / / / / / / / / / / / | |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* Total | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* Heigh | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* |Active| | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* |Heigh | | |/ / / / / / Active Display Area / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | | | |/ / / / / / / / / / / / / / / / / / / /| |
|
||||
* | \|/_____|_________|_______________________________________| |
|
||||
* | /|\ | |
|
||||
* | VFP | | |
|
||||
* \|/ \|/_____|______________________________________________________|
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
unsigned int pclk_hz; /*!< Frequency of pixel clock */
|
||||
@ -38,6 +69,22 @@ typedef struct {
|
||||
} flags;
|
||||
} esp_lcd_rgb_timing_t;
|
||||
|
||||
/**
|
||||
* @brief Type of RGB LCD panel event data
|
||||
*/
|
||||
typedef struct {
|
||||
} esp_lcd_rgb_panel_event_data_t;
|
||||
|
||||
/**
|
||||
* @brief Declare the prototype of the function that will be invoked when panel IO finishes transferring color data
|
||||
*
|
||||
* @param[in] panel LCD panel handle, returned from `esp_lcd_new_rgb_panel`
|
||||
* @param[in] edata Panel event data, fed by driver
|
||||
* @param[in] user_ctx User data, passed from `esp_lcd_rgb_panel_config_t`
|
||||
* @return Whether a high priority task has been waken up by this function
|
||||
*/
|
||||
typedef bool (*esp_lcd_rgb_panel_frame_trans_done_cb_t)(esp_lcd_panel_handle_t panel, esp_lcd_rgb_panel_event_data_t *edata, void *user_ctx);
|
||||
|
||||
/**
|
||||
* @brief LCD RGB panel configuration structure
|
||||
*/
|
||||
@ -51,8 +98,8 @@ typedef struct {
|
||||
int pclk_gpio_num; /*!< GPIO used for PCLK signal */
|
||||
int data_gpio_nums[SOC_LCD_RGB_DATA_WIDTH]; /*!< GPIOs used for data lines */
|
||||
int disp_gpio_num; /*!< GPIO used for display control signal, set to -1 if it's not used */
|
||||
bool (*on_frame_trans_done)(esp_lcd_panel_handle_t panel, void *user_data); /*!< Callback, invoked when one frame buffer has transferred done */
|
||||
void *user_data; /*!< User data which would be passed to on_frame_trans_done's user_data */
|
||||
esp_lcd_rgb_panel_frame_trans_done_cb_t on_frame_trans_done; /*!< Callback invoked when one frame buffer has transferred done */
|
||||
void *user_ctx; /*!< User data which would be passed to on_frame_trans_done's user_ctx */
|
||||
struct {
|
||||
unsigned int disp_active_low: 1; /*!< If this flag is enabled, a low level of display control signal can turn the screen on; vice versa */
|
||||
unsigned int relax_on_idle: 1; /*!< If this flag is enabled, the host won't refresh the LCD if nothing changed in host's frame buffer (this is usefull for LCD with built-in GRAM) */
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/poll.h>
|
||||
#include <dirent.h>
|
||||
#include <sys/dirent.h>
|
||||
#include <string.h>
|
||||
#include "sdkconfig.h"
|
||||
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef __ESP_COEXIST_H__
|
||||
#define __ESP_COEXIST_H__
|
||||
@ -32,6 +24,13 @@ typedef enum {
|
||||
ESP_COEX_PREFER_NUM, /*!< Prefer value numbers */
|
||||
} esp_coex_prefer_t;
|
||||
|
||||
typedef enum {
|
||||
EXTERN_COEX_WIRE_1 = 0,
|
||||
EXTERN_COEX_WIRE_2,
|
||||
EXTERN_COEX_WIRE_3,
|
||||
EXTERN_COEX_WIRE_NUM,
|
||||
} external_coex_wire_t;
|
||||
|
||||
/**
|
||||
* @brief coex status type
|
||||
*/
|
||||
@ -41,6 +40,36 @@ typedef enum {
|
||||
ESP_COEX_ST_TYPE_BT,
|
||||
} esp_coex_status_type_t;
|
||||
|
||||
/**
|
||||
* @brief external coex gpio pti
|
||||
*/
|
||||
typedef struct {
|
||||
int32_t in_pin0;
|
||||
int32_t in_pin1;
|
||||
int32_t out_pin0;
|
||||
} esp_external_coex_gpio_set_t;
|
||||
|
||||
/**
|
||||
* @brief external coex pti level
|
||||
*/
|
||||
typedef enum {
|
||||
EXTERN_COEX_PTI_MID = 0,
|
||||
EXTERN_COEX_PTI_HIGH,
|
||||
EXTERN_COEX_PTI_NUM,
|
||||
} esp_coex_pti_level_t;
|
||||
|
||||
/**
|
||||
* @brief external coex pti
|
||||
*/
|
||||
typedef struct {
|
||||
uint32_t in_pti1;
|
||||
uint32_t in_pti2;
|
||||
uint32_t in_pti3;
|
||||
uint32_t out_pti1;
|
||||
uint32_t out_pti2;
|
||||
uint32_t out_pti3;
|
||||
} esp_external_coex_pti_set_t;
|
||||
|
||||
#define ESP_COEX_BLE_ST_MESH_CONFIG 0x08
|
||||
#define ESP_COEX_BLE_ST_MESH_TRAFFIC 0x10
|
||||
#define ESP_COEX_BLE_ST_MESH_STANDBY 0x20
|
||||
@ -84,6 +113,18 @@ esp_err_t esp_coex_status_bit_set(esp_coex_status_type_t type, uint32_t status);
|
||||
*/
|
||||
esp_err_t esp_coex_status_bit_clear(esp_coex_status_type_t type, uint32_t status);
|
||||
|
||||
#if CONFIG_EXTERNAL_COEX_ENABLE
|
||||
/**
|
||||
* @brief Setup gpio pin and corresponding pti level, start external coex.
|
||||
* @param wire_type : to select the whole external coex gpio number.
|
||||
* @param gpio_pin : gpio pin number to choose.
|
||||
* @return : ESP_OK - success, other - failed
|
||||
*/
|
||||
esp_err_t esp_enable_extern_coex_gpio_pin(external_coex_wire_t wire_type,
|
||||
esp_external_coex_gpio_set_t gpio_pin);
|
||||
|
||||
esp_err_t esp_disable_extern_coex_gpio_pin();
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -1,21 +1,14 @@
|
||||
// Copyright 2018-2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef __ESP_COEXIST_INTERNAL_H__
|
||||
#define __ESP_COEXIST_INTERNAL_H__
|
||||
|
||||
#include <stdbool.h>
|
||||
#include "esp_coexist.h"
|
||||
#include "esp_coexist_adapter.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -210,6 +203,29 @@ int coex_schm_curr_phase_idx_get(void);
|
||||
*/
|
||||
esp_err_t esp_coex_adapter_register(coex_adapter_funcs_t *funcs);
|
||||
|
||||
#if CONFIG_EXTERNAL_COEX_ENABLE
|
||||
/**
|
||||
* @brief Set external coexistence pti level and enable it.
|
||||
*
|
||||
* @param level1 external coex low pti
|
||||
* @param level2 external coex mid pti
|
||||
* @param level3 external coex high pti
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK: succeed
|
||||
*/
|
||||
esp_err_t esp_coex_external_set(esp_coex_pti_level_t level1,
|
||||
esp_coex_pti_level_t level2, esp_coex_pti_level_t level3);
|
||||
|
||||
/**
|
||||
* @brief Disable external coexist
|
||||
*
|
||||
* @return
|
||||
* - ESP_OK: succeed
|
||||
*/
|
||||
void esp_coex_external_stop(void);
|
||||
#endif /*External Coex*/
|
||||
|
||||
/**
|
||||
* @brief Check the MD5 values of the coexistence adapter header files in IDF and WiFi library
|
||||
*
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
|
||||
#ifndef __ESP_WIFI_TYPES_H__
|
||||
@ -80,6 +72,7 @@ typedef enum {
|
||||
WIFI_REASON_ASSOC_NOT_AUTHED = 9,
|
||||
WIFI_REASON_DISASSOC_PWRCAP_BAD = 10,
|
||||
WIFI_REASON_DISASSOC_SUPCHAN_BAD = 11,
|
||||
WIFI_REASON_BSS_TRANSITION_DISASSOC = 12,
|
||||
WIFI_REASON_IE_INVALID = 13,
|
||||
WIFI_REASON_MIC_FAILURE = 14,
|
||||
WIFI_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
|
||||
@ -250,7 +243,8 @@ typedef struct {
|
||||
wifi_pmf_config_t pmf_cfg; /**< Configuration for Protected Management Frame. Will be advertized in RSN Capabilities in RSN IE. */
|
||||
uint32_t rm_enabled:1; /**< Whether Radio Measurements are enabled for the connection */
|
||||
uint32_t btm_enabled:1; /**< Whether BSS Transition Management is enabled for the connection */
|
||||
uint32_t reserved:30; /**< Reserved for future feature set */
|
||||
uint32_t mbo_enabled:1; /**< Whether MBO is enabled for the connection */
|
||||
uint32_t reserved:29; /**< Reserved for future feature set */
|
||||
} wifi_sta_config_t;
|
||||
|
||||
/** @brief Configuration data for ESP32 AP or STA.
|
||||
|
@ -90,7 +90,6 @@
|
||||
#define portNUM_PROCESSORS 1
|
||||
#endif
|
||||
|
||||
#define configASSERT_2 0
|
||||
#define portUSING_MPU_WRAPPERS 0
|
||||
#define configUSE_MUTEX 1
|
||||
|
||||
@ -206,7 +205,6 @@
|
||||
#define configGENERATE_RUN_TIME_STATS 1 /* Used by vTaskGetRunTimeStats() */
|
||||
#endif
|
||||
|
||||
#define configUSE_TRACE_FACILITY_2 0
|
||||
#define configBENCHMARK 0
|
||||
#define configUSE_16_BIT_TICKS 0
|
||||
#define configIDLE_SHOULD_YIELD 0
|
||||
@ -306,4 +304,9 @@ extern void vPortCleanUpTCB ( void *pxTCB );
|
||||
|
||||
#define configTASK_NOTIFICATION_ARRAY_ENTRIES 1
|
||||
|
||||
// backward compatibility for 4.4
|
||||
#define xTaskRemoveFromUnorderedEventList vTaskRemoveFromUnorderedEventList
|
||||
|
||||
#define configNUM_CORES portNUM_PROCESSORS
|
||||
|
||||
#endif /* FREERTOS_CONFIG_H */
|
||||
|
@ -64,7 +64,7 @@
|
||||
* used to create a synchronisation point between multiple tasks (a
|
||||
* 'rendezvous').
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup EventGroup EventGroup
|
||||
* @endcond
|
||||
*/
|
||||
@ -78,7 +78,7 @@
|
||||
* xEventGroupCreate() returns an EventGroupHandle_t variable that can then
|
||||
* be used as a parameter to other event group functions.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup EventGroupHandle_t EventGroupHandle_t
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -94,7 +94,7 @@ typedef struct EventGroupDef_t * EventGroupHandle_t;
|
||||
* number of bits it holds is set by configUSE_16_BIT_TICKS (16 bits if set to 1,
|
||||
* 32 bits if set to 0.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup EventBits_t EventBits_t
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -102,7 +102,7 @@ typedef struct EventGroupDef_t * EventGroupHandle_t;
|
||||
typedef TickType_t EventBits_t;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventGroupHandle_t xEventGroupCreate( void );
|
||||
@ -152,7 +152,7 @@ typedef TickType_t EventBits_t;
|
||||
* // The event group was created.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupCreate xEventGroupCreate
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -162,7 +162,7 @@ typedef TickType_t EventBits_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventGroupHandle_t xEventGroupCreateStatic( EventGroupHandle_t * pxEventGroupBuffer );
|
||||
@ -217,7 +217,7 @@ typedef TickType_t EventBits_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
@ -307,7 +307,7 @@ typedef TickType_t EventBits_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupWaitBits xEventGroupWaitBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -319,7 +319,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToClear );
|
||||
@ -372,7 +372,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupClearBits xEventGroupClearBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -381,7 +381,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
const EventBits_t uxBitsToClear ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
|
||||
@ -432,7 +432,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupClearBitsFromISR xEventGroupClearBitsFromISR
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -446,7 +446,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet );
|
||||
@ -516,7 +516,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupSetBits xEventGroupSetBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -525,7 +525,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
const EventBits_t uxBitsToSet ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
@ -595,7 +595,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupSetBitsFromISR xEventGroupSetBitsFromISR
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -610,7 +610,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
@ -732,7 +732,7 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupSync xEventGroupSync
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -744,7 +744,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupGetBits( EventGroupHandle_t xEventGroup );
|
||||
@ -758,7 +758,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
*
|
||||
* @return The event group bits at the time xEventGroupGetBits() was called.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupGetBits xEventGroupGetBits
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -766,7 +766,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
#define xEventGroupGetBits( xEventGroup ) xEventGroupClearBits( xEventGroup, 0 )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup );
|
||||
@ -779,7 +779,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
*
|
||||
* @return The event group bits at the time xEventGroupGetBitsFromISR() was called.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xEventGroupGetBitsFromISR xEventGroupGetBitsFromISR
|
||||
* @endcond
|
||||
* \ingroup EventGroup
|
||||
@ -787,7 +787,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
|
||||
EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* event_groups.h
|
||||
* @code{c}
|
||||
* void xEventGroupDelete( EventGroupHandle_t xEventGroup );
|
||||
@ -802,7 +802,7 @@ EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup ) PRIVILEG
|
||||
*/
|
||||
void vEventGroupDelete( EventGroupHandle_t xEventGroup ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/* For internal use only. */
|
||||
void vEventGroupSetBitsCallback( void * pvEventGroup,
|
||||
|
@ -85,7 +85,7 @@ typedef void * MessageBufferHandle_t;
|
||||
/*-----------------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -139,7 +139,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferCreate xMessageBufferCreate
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -148,7 +148,7 @@ typedef void * MessageBufferHandle_t;
|
||||
( MessageBufferHandle_t ) xStreamBufferGenericCreate( xBufferSizeBytes, ( size_t ) 0, pdTRUE )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -210,7 +210,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferCreateStatic xMessageBufferCreateStatic
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -219,7 +219,7 @@ typedef void * MessageBufferHandle_t;
|
||||
( MessageBufferHandle_t ) xStreamBufferGenericCreateStatic( xBufferSizeBytes, 0, pdTRUE, pucMessageBufferStorageArea, pxStaticMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -314,7 +314,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSend xMessageBufferSend
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -323,7 +323,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSend( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, xTicksToWait )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -423,7 +423,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSendFromISR xMessageBufferSendFromISR
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -432,7 +432,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSendFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvTxData, xDataLengthBytes, pxHigherPriorityTaskWoken )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -516,7 +516,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReceive xMessageBufferReceive
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -526,7 +526,7 @@ typedef void * MessageBufferHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -622,7 +622,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReceiveFromISR xMessageBufferReceiveFromISR
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -631,7 +631,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferReceiveFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pvRxData, xBufferLengthBytes, pxHigherPriorityTaskWoken )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -654,7 +654,7 @@ typedef void * MessageBufferHandle_t;
|
||||
vStreamBufferDelete( ( StreamBufferHandle_t ) xMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* BaseType_t xMessageBufferIsFull( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -674,7 +674,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferIsFull( ( StreamBufferHandle_t ) xMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* BaseType_t xMessageBufferIsEmpty( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -693,7 +693,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferIsEmpty( ( StreamBufferHandle_t ) xMessageBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* BaseType_t xMessageBufferReset( MessageBufferHandle_t xMessageBuffer );
|
||||
@ -712,7 +712,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* the message queue to wait for space to become available, or to wait for a
|
||||
* a message to be available, then pdFAIL is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReset xMessageBufferReset
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -722,7 +722,7 @@ typedef void * MessageBufferHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* size_t xMessageBufferSpaceAvailable( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -740,7 +740,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* architecture, so if xMessageBufferSpacesAvailable() returns 10, then the size
|
||||
* of the largest message that can be written to the message buffer is 6 bytes.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSpaceAvailable xMessageBufferSpaceAvailable
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -751,7 +751,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSpacesAvailable( ( StreamBufferHandle_t ) xMessageBuffer ) /* Corrects typo in original macro name. */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
* @code{c}
|
||||
* size_t xMessageBufferNextLengthBytes( MessageBufferHandle_t xMessageBuffer ) );
|
||||
@ -767,7 +767,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* @return The length (in bytes) of the next message in the message buffer, or 0
|
||||
* if the message buffer is empty.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferNextLengthBytes xMessageBufferNextLengthBytes
|
||||
* @endcond
|
||||
* \ingroup MessageBufferManagement
|
||||
@ -776,7 +776,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferNextMessageLengthBytes( ( StreamBufferHandle_t ) xMessageBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -811,7 +811,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferSendCompletedFromISR xMessageBufferSendCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -820,7 +820,7 @@ typedef void * MessageBufferHandle_t;
|
||||
xStreamBufferSendCompletedFromISR( ( StreamBufferHandle_t ) xMessageBuffer, pxHigherPriorityTaskWoken )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -856,7 +856,7 @@ typedef void * MessageBufferHandle_t;
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xMessageBufferReceiveCompletedFromISR xMessageBufferReceiveCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
|
@ -62,7 +62,7 @@ typedef struct QueueDefinition * QueueSetHandle_t;
|
||||
*/
|
||||
typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/* For internal use only. */
|
||||
#define queueSEND_TO_BACK ( ( BaseType_t ) 0 )
|
||||
@ -80,7 +80,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* QueueHandle_t xQueueCreate(
|
||||
@ -146,7 +146,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueCreate xQueueCreate
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -156,7 +156,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* QueueHandle_t xQueueCreateStatic(
|
||||
@ -235,7 +235,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueCreateStatic xQueueCreateStatic
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -245,7 +245,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToToFront(
|
||||
@ -321,7 +321,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -330,7 +330,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_FRONT )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToBack(
|
||||
@ -408,7 +408,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -417,7 +417,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSend(
|
||||
@ -497,7 +497,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -506,7 +506,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
xQueueGenericSend( ( xQueue ), ( pvItemToQueue ), ( xTicksToWait ), queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueOverwrite(
|
||||
@ -585,7 +585,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ...
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueOverwrite xQueueOverwrite
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -595,7 +595,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueGenericSend(
|
||||
@ -678,7 +678,7 @@ typedef struct QueueDefinition * QueueSetMemberHandle_t;
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSend xQueueSend
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -689,7 +689,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueuePeek(
|
||||
@ -780,7 +780,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueuePeek xQueuePeek
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -790,7 +790,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueuePeekFromISR(
|
||||
@ -820,7 +820,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
|
||||
* @return pdTRUE if an item was successfully received from the queue,
|
||||
* otherwise pdFALSE.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueuePeekFromISR xQueuePeekFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -829,7 +829,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
void * const pvBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueReceive(
|
||||
@ -917,7 +917,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
|
||||
* // ... Rest of task code.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueReceive xQueueReceive
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -927,7 +927,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue );
|
||||
@ -940,7 +940,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
*
|
||||
* @return The number of messages available in the queue.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -948,7 +948,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
|
||||
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue );
|
||||
@ -963,7 +963,7 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
*
|
||||
* @return The number of spaces available in the queue.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxQueueMessagesWaiting uxQueueMessagesWaiting
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -971,7 +971,7 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* void vQueueDelete( QueueHandle_t xQueue );
|
||||
@ -983,7 +983,7 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
*
|
||||
* @param xQueue A handle to the queue to be deleted.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vQueueDelete vQueueDelete
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -991,7 +991,7 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue ) PRIVILEGED_FUNC
|
||||
void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToFrontFromISR(
|
||||
@ -1057,7 +1057,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSendFromISR xQueueSendFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1067,7 +1067,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendToBackFromISR(
|
||||
@ -1133,7 +1133,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSendFromISR xQueueSendFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1142,7 +1142,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueOverwriteFromISR(
|
||||
@ -1225,7 +1225,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueOverwriteFromISR xQueueOverwriteFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1234,7 +1234,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueOVERWRITE )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueSendFromISR(
|
||||
@ -1304,7 +1304,7 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueSendFromISR xQueueSendFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1312,10 +1312,10 @@ void vQueueDelete( QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
#define xQueueSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken ) \
|
||||
xQueueGenericSendFromISR( ( xQueue ), ( pvItemToQueue ), ( pxHigherPriorityTaskWoken ), queueSEND_TO_BACK )
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**@{*/
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueGenericSendFromISR(
|
||||
@ -1402,7 +1402,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* queue. h
|
||||
* @code{c}
|
||||
* BaseType_t xQueueReceiveFromISR(
|
||||
@ -1487,7 +1487,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xQueueReceiveFromISR xQueueReceiveFromISR
|
||||
* @endcond
|
||||
* \ingroup QueueManagement
|
||||
@ -1504,7 +1504,7 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FU
|
||||
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/*
|
||||
* The functions defined above are for passing data to and from tasks. The
|
||||
* functions below are the equivalents for passing data to and from
|
||||
@ -1778,7 +1778,7 @@ QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
|
||||
*/
|
||||
QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/* Not public API functions. */
|
||||
void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
|
||||
|
@ -39,7 +39,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define semSEMAPHORE_QUEUE_ITEM_LENGTH ( ( uint8_t ) 0U )
|
||||
#define semGIVE_BLOCK_TIME ( ( TickType_t ) 0U )
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
@ -88,7 +88,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vSemaphoreCreateBinary vSemaphoreCreateBinary
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -106,7 +106,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateBinary( void );
|
||||
@ -163,7 +163,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateBinary xSemaphoreCreateBinary
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -173,7 +173,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateBinaryStatic( StaticSemaphore_t *pxSemaphoreBuffer );
|
||||
@ -229,7 +229,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* // Rest of task code goes here.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateBinaryStatic xSemaphoreCreateBinaryStatic
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -239,7 +239,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreTake(
|
||||
@ -304,7 +304,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreTake xSemaphoreTake
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -312,7 +312,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreTake( xSemaphore, xBlockTime ) xQueueSemaphoreTake( ( xSemaphore ), ( xBlockTime ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreTakeRecursive(
|
||||
@ -403,7 +403,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreTakeRecursive xSemaphoreTakeRecursive
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -465,7 +465,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreGive xSemaphoreGive
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -473,7 +473,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGive( xSemaphore ) xQueueGenericSend( ( QueueHandle_t ) ( xSemaphore ), NULL, semGIVE_BLOCK_TIME, queueSEND_TO_BACK )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreGiveRecursive( SemaphoreHandle_t xMutex );
|
||||
@ -555,7 +555,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreGiveRecursive xSemaphoreGiveRecursive
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -641,7 +641,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreGiveFromISR xSemaphoreGiveFromISR
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -649,7 +649,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGiveFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueGiveFromISR( ( QueueHandle_t ) ( xSemaphore ), ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* xSemaphoreTakeFromISR(
|
||||
@ -686,7 +686,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreTakeFromISR( xSemaphore, pxHigherPriorityTaskWoken ) xQueueReceiveFromISR( ( QueueHandle_t ) ( xSemaphore ), NULL, ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateMutex( void );
|
||||
@ -741,7 +741,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateMutex xSemaphoreCreateMutex
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -751,7 +751,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateMutexStatic( StaticSemaphore_t *pxMutexBuffer );
|
||||
@ -808,7 +808,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* // so there is no need to check it.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateMutexStatic xSemaphoreCreateMutexStatic
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -951,7 +951,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateCounting( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount );
|
||||
@ -1027,7 +1027,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateCounting xSemaphoreCreateCounting
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -1037,7 +1037,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* SemaphoreHandle_t xSemaphoreCreateCountingStatic( UBaseType_t uxMaxCount, UBaseType_t uxInitialCount, StaticSemaphore_t *pxSemaphoreBuffer );
|
||||
@ -1118,7 +1118,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
* // is no need to check its value.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xSemaphoreCreateCountingStatic xSemaphoreCreateCountingStatic
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -1128,7 +1128,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr. h
|
||||
* @code{c}
|
||||
* void vSemaphoreDelete( SemaphoreHandle_t xSemaphore );
|
||||
@ -1140,7 +1140,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
*
|
||||
* @param xSemaphore A handle to the semaphore to be deleted.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* \defgroup vSemaphoreDelete vSemaphoreDelete
|
||||
* @endcond
|
||||
* \ingroup Semaphores
|
||||
@ -1148,7 +1148,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define vSemaphoreDelete( xSemaphore ) vQueueDelete( ( QueueHandle_t ) ( xSemaphore ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr.h
|
||||
* @code{c}
|
||||
* TaskHandle_t xSemaphoreGetMutexHolder( SemaphoreHandle_t xMutex );
|
||||
@ -1167,7 +1167,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGetMutexHolder( xSemaphore ) xQueueGetMutexHolder( ( xSemaphore ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr.h
|
||||
* @code{c}
|
||||
* TaskHandle_t xSemaphoreGetMutexHolderFromISR( SemaphoreHandle_t xMutex );
|
||||
@ -1182,7 +1182,7 @@ typedef QueueHandle_t SemaphoreHandle_t;
|
||||
#define xSemaphoreGetMutexHolderFromISR( xSemaphore ) xQueueGetMutexHolderFromISR( ( xSemaphore ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* semphr.h
|
||||
* @code{c}
|
||||
* UBaseType_t uxSemaphoreGetCount( SemaphoreHandle_t xSemaphore );
|
||||
|
@ -71,7 +71,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* message_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -134,7 +134,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferCreate xStreamBufferCreate
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -142,7 +142,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
#define xStreamBufferCreate( xBufferSizeBytes, xTriggerLevelBytes ) xStreamBufferGenericCreate( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -220,7 +220,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
* }
|
||||
*
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferCreateStatic xStreamBufferCreateStatic
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -229,7 +229,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
xStreamBufferGenericCreateStatic( xBufferSizeBytes, xTriggerLevelBytes, pdFALSE, pucStreamBufferStorageArea, pxStaticStreamBuffer )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -319,7 +319,7 @@ typedef struct StreamBufferDef_t * StreamBufferHandle_t;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSend xStreamBufferSend
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -330,7 +330,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -424,7 +424,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
|
||||
* taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSendFromISR xStreamBufferSendFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -435,7 +435,7 @@ size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -517,7 +517,7 @@ size_t xStreamBufferSendFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReceive xStreamBufferReceive
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -528,7 +528,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -607,7 +607,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
|
||||
* taskYIELD_FROM_ISR( xHigherPriorityTaskWoken );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReceiveFromISR xStreamBufferReceiveFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -618,7 +618,7 @@ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
BaseType_t * const pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -636,7 +636,7 @@ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
*
|
||||
* @param xStreamBuffer The handle of the stream buffer to be deleted.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vStreamBufferDelete vStreamBufferDelete
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -644,7 +644,7 @@ size_t xStreamBufferReceiveFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -660,7 +660,7 @@ void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTI
|
||||
* @return If the stream buffer is full then pdTRUE is returned. Otherwise
|
||||
* pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferIsFull xStreamBufferIsFull
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -668,7 +668,7 @@ void vStreamBufferDelete( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTI
|
||||
BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -684,7 +684,7 @@ BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_
|
||||
* @return If the stream buffer is empty then pdTRUE is returned. Otherwise
|
||||
* pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferIsEmpty xStreamBufferIsEmpty
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -692,7 +692,7 @@ BaseType_t xStreamBufferIsFull( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_
|
||||
BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -711,7 +711,7 @@ BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED
|
||||
* a task blocked waiting to send to or read from the stream buffer then the
|
||||
* stream buffer is not reset and pdFAIL is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReset xStreamBufferReset
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -719,7 +719,7 @@ BaseType_t xStreamBufferIsEmpty( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED
|
||||
BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -736,7 +736,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_F
|
||||
* @return The number of bytes that can be written to the stream buffer before
|
||||
* the stream buffer would be full.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSpacesAvailable xStreamBufferSpacesAvailable
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -744,7 +744,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_F
|
||||
size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -761,7 +761,7 @@ size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVIL
|
||||
* @return The number of bytes that can be read from the stream buffer before
|
||||
* the stream buffer would be empty.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferBytesAvailable xStreamBufferBytesAvailable
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -769,7 +769,7 @@ size_t xStreamBufferSpacesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVIL
|
||||
size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -802,7 +802,7 @@ size_t xStreamBufferBytesAvailable( StreamBufferHandle_t xStreamBuffer ) PRIVILE
|
||||
* then the trigger level will be updated and pdTRUE is returned. Otherwise
|
||||
* pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSetTriggerLevel xStreamBufferSetTriggerLevel
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -811,7 +811,7 @@ BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
|
||||
size_t xTriggerLevel ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -846,7 +846,7 @@ BaseType_t xStreamBufferSetTriggerLevel( StreamBufferHandle_t xStreamBuffer,
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferSendCompletedFromISR xStreamBufferSendCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -855,7 +855,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
||||
BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* stream_buffer.h
|
||||
*
|
||||
* @code{c}
|
||||
@ -891,7 +891,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
||||
* @return If a task was removed from the Blocked state then pdTRUE is returned.
|
||||
* Otherwise pdFALSE is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xStreamBufferReceiveCompletedFromISR xStreamBufferReceiveCompletedFromISR
|
||||
* @endcond
|
||||
* \ingroup StreamBufferManagement
|
||||
@ -899,7 +899,7 @@ BaseType_t xStreamBufferSendCompletedFromISR( StreamBufferHandle_t xStreamBuffer
|
||||
BaseType_t xStreamBufferReceiveCompletedFromISR( StreamBufferHandle_t xStreamBuffer,
|
||||
BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/* Functions below here are not part of the public API. */
|
||||
StreamBufferHandle_t xStreamBufferGenericCreate( size_t xBufferSizeBytes,
|
||||
size_t xTriggerLevelBytes,
|
||||
|
@ -76,7 +76,7 @@
|
||||
* returns (via a pointer parameter) an TaskHandle_t variable that can then
|
||||
* be used as a parameter to vTaskDelete to delete the task.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup TaskHandle_t TaskHandle_t
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -114,7 +114,7 @@ typedef enum
|
||||
eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */
|
||||
} eNotifyAction;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**
|
||||
* Used internally only.
|
||||
*/
|
||||
@ -189,11 +189,13 @@ typedef enum
|
||||
#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro for forcing a context switch.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskYIELD taskYIELD
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -201,7 +203,9 @@ typedef enum
|
||||
#define taskYIELD() portYIELD()
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to mark the start of a critical code region. Preemptive context
|
||||
* switches cannot occur when in a critical region.
|
||||
@ -209,7 +213,7 @@ typedef enum
|
||||
* @note This may alter the stack (depending on the portable implementation)
|
||||
* so must be used with care!
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskENTER_CRITICAL taskENTER_CRITICAL
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -228,7 +232,9 @@ typedef enum
|
||||
#endif // ESP_PLATFORM
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to mark the end of a critical code region. Preemptive context
|
||||
* switches cannot occur when in a critical region.
|
||||
@ -236,7 +242,7 @@ typedef enum
|
||||
* @note This may alter the stack (depending on the portable implementation)
|
||||
* so must be used with care!
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -255,11 +261,13 @@ typedef enum
|
||||
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( )
|
||||
#endif // ESP_PLATFORM
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to disable all maskable interrupts.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -267,11 +275,13 @@ typedef enum
|
||||
#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS()
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @endcond
|
||||
*
|
||||
* Macro to enable microcontroller interrupts.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -422,7 +432,7 @@ typedef enum
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreate xTaskCreate
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -430,14 +440,14 @@ typedef enum
|
||||
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
|
||||
|
||||
static inline IRAM_ATTR BaseType_t xTaskCreate(
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName,
|
||||
const uint32_t usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pvCreatedTask)
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const uint32_t usStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
TaskHandle_t * const pxCreatedTask) PRIVILEGED_FUNCTION
|
||||
{
|
||||
return xTaskCreatePinnedToCore( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pvCreatedTask, tskNO_AFFINITY );
|
||||
return xTaskCreatePinnedToCore( pvTaskCode, pcName, usStackDepth, pvParameters, uxPriority, pxCreatedTask, tskNO_AFFINITY );
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -599,20 +609,20 @@ typedef enum
|
||||
|
||||
#if( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
static inline IRAM_ATTR TaskHandle_t xTaskCreateStatic(
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName,
|
||||
const uint32_t ulStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
StackType_t * const pxStackBuffer,
|
||||
StaticTask_t * const pxTaskBuffer)
|
||||
TaskFunction_t pvTaskCode,
|
||||
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
const uint32_t ulStackDepth,
|
||||
void * const pvParameters,
|
||||
UBaseType_t uxPriority,
|
||||
StackType_t * const puxStackBuffer,
|
||||
StaticTask_t * const pxTaskBuffer) PRIVILEGED_FUNCTION
|
||||
{
|
||||
return xTaskCreateStaticPinnedToCore( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, pxStackBuffer, pxTaskBuffer, tskNO_AFFINITY );
|
||||
return xTaskCreateStaticPinnedToCore( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY );
|
||||
}
|
||||
#endif /* configSUPPORT_STATIC_ALLOCATION */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
|
||||
@ -683,18 +693,18 @@ typedef enum
|
||||
* for( ;; );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreateRestricted xTaskCreateRestricted
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
*/
|
||||
#if ( portUSING_MPU_WRAPPERS == 1 )
|
||||
BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
|
||||
TaskHandle_t * pxCreatedTask );
|
||||
TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
|
||||
@ -777,7 +787,7 @@ typedef enum
|
||||
* for( ;; );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -788,7 +798,7 @@ typedef enum
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );
|
||||
@ -833,7 +843,7 @@ typedef enum
|
||||
* // defined or shared regions have been declared elsewhere).
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCreateRestricted xTaskCreateRestricted
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -842,7 +852,7 @@ void vTaskAllocateMPURegions( TaskHandle_t xTask,
|
||||
const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskDelete( TaskHandle_t xTask );
|
||||
@ -881,7 +891,7 @@ void vTaskAllocateMPURegions( TaskHandle_t xTask,
|
||||
* vTaskDelete( xHandle );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskDelete vTaskDelete
|
||||
* @endcond
|
||||
* \ingroup Tasks
|
||||
@ -893,10 +903,12 @@ void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
|
||||
*----------------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskDelay( const TickType_t xTicksToDelay );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Delay a task for a given number of ticks. The actual time that the
|
||||
* task remains blocked depends on the tick rate. The constant
|
||||
@ -938,7 +950,7 @@ void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskDelay vTaskDelay
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -946,10 +958,12 @@ void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION;
|
||||
void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* INCLUDE_xTaskDelayUntil must be defined as 1 for this function to be available.
|
||||
* See the configuration section for more information.
|
||||
@ -1007,7 +1021,7 @@ void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskDelayUntil xTaskDelayUntil
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1026,7 +1040,7 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
||||
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskAbortDelay( TaskHandle_t xTask );
|
||||
@ -1054,7 +1068,7 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
||||
* @return If the task referenced by xTask was not in the Blocked state then
|
||||
* pdFAIL is returned. Otherwise pdPASS is returned.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskAbortDelay xTaskAbortDelay
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1062,7 +1076,7 @@ BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
|
||||
BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );
|
||||
@ -1107,7 +1121,7 @@ BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxTaskPriorityGet uxTaskPriorityGet
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1115,7 +1129,7 @@ BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );
|
||||
@ -1127,7 +1141,7 @@ UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* eTaskState eTaskGetState( TaskHandle_t xTask );
|
||||
@ -1149,7 +1163,7 @@ UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNC
|
||||
eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );
|
||||
@ -1203,7 +1217,7 @@ eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
* eInvalid ); // Include the task state in xTaskDetails.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskGetInfo vTaskGetInfo
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1214,7 +1228,7 @@ void vTaskGetInfo( TaskHandle_t xTask,
|
||||
eTaskState eState ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );
|
||||
@ -1254,7 +1268,7 @@ void vTaskGetInfo( TaskHandle_t xTask,
|
||||
* vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 );
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskPrioritySet vTaskPrioritySet
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1263,7 +1277,7 @@ void vTaskPrioritySet( TaskHandle_t xTask,
|
||||
UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskSuspend( TaskHandle_t xTaskToSuspend );
|
||||
@ -1312,7 +1326,7 @@ void vTaskPrioritySet( TaskHandle_t xTask,
|
||||
* // with our handle as the parameter.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskSuspend vTaskSuspend
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1320,7 +1334,7 @@ void vTaskPrioritySet( TaskHandle_t xTask,
|
||||
void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskResume( TaskHandle_t xTaskToResume );
|
||||
@ -1367,7 +1381,7 @@ void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION;
|
||||
* // time in accordance with its priority within the system.
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskResume vTaskResume
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1375,7 +1389,7 @@ void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION;
|
||||
void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void xTaskResumeFromISR( TaskHandle_t xTaskToResume );
|
||||
@ -1402,7 +1416,7 @@ void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
* otherwise pdFALSE. This is used by the ISR to determine if a context switch
|
||||
* may be required following the ISR.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskResumeFromISR vTaskResumeFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -1412,9 +1426,9 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
/*-----------------------------------------------------------
|
||||
* SCHEDULER CONTROL
|
||||
*----------------------------------------------------------*/
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskStartScheduler( void );
|
||||
@ -1445,7 +1459,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskStartScheduler vTaskStartScheduler
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1453,7 +1467,7 @@ BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION;
|
||||
void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskEndScheduler( void );
|
||||
@ -1507,7 +1521,7 @@ void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* @endcode
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskEndScheduler vTaskEndScheduler
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1517,7 +1531,7 @@ void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskSuspendAll( void );
|
||||
@ -1566,7 +1580,7 @@ void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskSuspendAll vTaskSuspendAll
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1574,7 +1588,7 @@ void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION;
|
||||
void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskResumeAll( void );
|
||||
@ -1626,7 +1640,7 @@ void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION;
|
||||
* }
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskResumeAll xTaskResumeAll
|
||||
* @endcond
|
||||
* \ingroup SchedulerControl
|
||||
@ -1638,7 +1652,7 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
|
||||
*----------------------------------------------------------*/
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* TickType_t xTaskGetTickCount( void );
|
||||
@ -1647,7 +1661,7 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
|
||||
*
|
||||
* @return The count of ticks since vTaskStartScheduler was called.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskGetTickCount xTaskGetTickCount
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1655,7 +1669,7 @@ BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION;
|
||||
TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* TickType_t xTaskGetTickCountFromISR( void );
|
||||
@ -1669,7 +1683,7 @@ TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
|
||||
* microcontroller being used or interrupt nesting is either not supported or
|
||||
* not being used.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1677,7 +1691,7 @@ TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION;
|
||||
TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* uint16_t uxTaskGetNumberOfTasks( void );
|
||||
@ -1689,7 +1703,7 @@ TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
|
||||
* has been deleted but not yet freed by the idle task will also be
|
||||
* included in the count.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1697,7 +1711,7 @@ TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION;
|
||||
UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* char *pcTaskGetName( TaskHandle_t xTaskToQuery );
|
||||
@ -1708,7 +1722,7 @@ UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
|
||||
* xTaskToQuery. A task can query its own name by either passing in its own
|
||||
* handle, or by setting xTaskToQuery to NULL.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup pcTaskGetName pcTaskGetName
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1716,7 +1730,7 @@ UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION;
|
||||
char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );
|
||||
@ -1730,7 +1744,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lin
|
||||
* NULL is returned if no matching name is found. INCLUDE_xTaskGetHandle
|
||||
* must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup pcTaskGetHandle pcTaskGetHandle
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -1813,7 +1827,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
#ifdef configUSE_APPLICATION_TASK_TAG
|
||||
#if configUSE_APPLICATION_TASK_TAG == 1
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );
|
||||
@ -1830,7 +1844,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
TaskHookFunction_t pxHookFunction ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void xTaskGetApplicationTaskTag( TaskHandle_t xTask );
|
||||
@ -1844,7 +1858,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );
|
||||
@ -1932,7 +1946,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
#if ( configCHECK_FOR_STACK_OVERFLOW > 0 )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationStackOverflowHook( TaskHandle_t xTask char *pcTaskName);
|
||||
@ -1952,7 +1966,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
|
||||
#if ( configUSE_TICK_HOOK > 0 )
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationTickHook( void );
|
||||
@ -1967,7 +1981,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer, StackType_t ** ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize )
|
||||
@ -1986,7 +2000,7 @@ uint8_t* pxTaskGetStackStart( TaskHandle_t xTask) PRIVILEGED_FUNCTION;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );
|
||||
@ -2155,7 +2169,7 @@ UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
|
||||
* enough to contain the generated report. Approximately 40 bytes per
|
||||
* task should be sufficient.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskList vTaskList
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -2210,7 +2224,7 @@ void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unq
|
||||
* contain the generated report. Approximately 40 bytes per task should
|
||||
* be sufficient.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -2218,7 +2232,7 @@ void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unq
|
||||
void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code
|
||||
* uint32_t ulTaskGetIdleRunTimeCounter( void );
|
||||
@ -2246,7 +2260,7 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lin
|
||||
* frequency configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and
|
||||
* portGET_RUN_TIME_COUNTER_VALUE() macros.
|
||||
*
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup ulTaskGetIdleRunTimeCounter ulTaskGetIdleRunTimeCounter
|
||||
* @endcond
|
||||
* \ingroup TaskUtils
|
||||
@ -2254,11 +2268,13 @@ void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lin
|
||||
uint32_t ulTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction );
|
||||
* BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2359,7 +2375,9 @@ uint32_t ulTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION;
|
||||
* @return Dependent on the value of eAction. See the description of the
|
||||
* eAction parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyIndexed xTaskNotifyIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
@ -2373,11 +2391,13 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyAndQueryIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
|
||||
* BaseType_t xTaskNotifyAndQuery( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2393,7 +2413,9 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
* than when the function returns) in the additional pulPreviousNotifyValue
|
||||
* parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyAndQueryIndexed xTaskNotifyAndQueryIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) \
|
||||
@ -2402,11 +2424,13 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2511,7 +2535,9 @@ BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
|
||||
* @return Dependent on the value of eAction. See the description of the
|
||||
* eAction parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyIndexedFromISR xTaskNotifyIndexedFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
@ -2526,11 +2552,13 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyAndQueryIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* BaseType_t xTaskNotifyAndQueryFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2546,7 +2574,9 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
* function is called rather than at the time the function returns) in the
|
||||
* additional pulPreviousNotifyValue parameter.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyAndQueryIndexedFromISR xTaskNotifyAndQueryIndexedFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
#define xTaskNotifyAndQueryIndexedFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) \
|
||||
@ -2555,12 +2585,14 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyWaitIndexed( UBaseType_t uxIndexToWaitOn, uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
|
||||
*
|
||||
* BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Waits for a direct to task notification to be pending at a given index within
|
||||
* an array of direct to task notifications.
|
||||
@ -2655,7 +2687,9 @@ BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
|
||||
* already pending when xTaskNotifyWait was called) then pdPASS is
|
||||
* returned. Otherwise pdFAIL is returned.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyWaitIndexed xTaskNotifyWaitIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
@ -2669,11 +2703,13 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
xTaskGenericNotifyWait( ( uxIndexToWaitOn ), ( ulBitsToClearOnEntry ), ( ulBitsToClearOnExit ), ( pulNotificationValue ), ( xTicksToWait ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyGiveIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify );
|
||||
* BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Sends a direct to task notification to a particular index in the target
|
||||
* task's notification array in a manner similar to giving a counting semaphore.
|
||||
@ -2737,7 +2773,9 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
* @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the
|
||||
* eAction parameter set to eIncrement - so pdPASS is always returned.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyGiveIndexed xTaskNotifyGiveIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
#define xTaskNotifyGive( xTaskToNotify ) \
|
||||
@ -2746,11 +2784,13 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( 0 ), eIncrement, NULL )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* void vTaskNotifyGiveIndexedFromISR( TaskHandle_t xTaskHandle, UBaseType_t uxIndexToNotify, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* A version of xTaskNotifyGiveIndexed() that can be called from an interrupt
|
||||
* service routine (ISR).
|
||||
@ -2821,7 +2861,9 @@ BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn,
|
||||
* requested from an ISR is dependent on the port - see the documentation page
|
||||
* for the port in use.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup vTaskNotifyGiveIndexedFromISR vTaskNotifyGiveIndexedFromISR
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
|
||||
@ -2833,12 +2875,14 @@ void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
|
||||
vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( pxHigherPriorityTaskWoken ) );
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* uint32_t ulTaskNotifyTakeIndexed( UBaseType_t uxIndexToWaitOn, BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
|
||||
*
|
||||
* uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* Waits for a direct to task notification on a particular index in the calling
|
||||
* task's notification array in a manner similar to taking a counting semaphore.
|
||||
@ -2927,7 +2971,9 @@ void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
|
||||
* @return The task's notification count before it is either cleared to zero or
|
||||
* decremented (see the xClearCountOnExit parameter).
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup ulTaskNotifyTakeIndexed ulTaskNotifyTakeIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
|
||||
@ -2939,12 +2985,14 @@ uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
|
||||
ulTaskGenericNotifyTake( ( uxIndexToWaitOn ), ( xClearCountOnExit ), ( xTicksToWait ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskNotifyStateClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToCLear );
|
||||
*
|
||||
* BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -2992,7 +3040,9 @@ uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn,
|
||||
* @return pdTRUE if the task's notification state was set to
|
||||
* eNotWaitingNotification, otherwise pdFALSE.
|
||||
*
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskNotifyStateClearIndexed xTaskNotifyStateClearIndexed
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
|
||||
@ -3003,12 +3053,14 @@ BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
|
||||
xTaskGenericNotifyStateClear( ( xTask ), ( uxIndexToClear ) )
|
||||
|
||||
/**
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task. h
|
||||
* @code{c}
|
||||
* uint32_t ulTaskNotifyValueClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToClear, uint32_t ulBitsToClear );
|
||||
*
|
||||
* uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear );
|
||||
* @endcode
|
||||
* @endcond
|
||||
*
|
||||
* See https://www.FreeRTOS.org/RTOS-task-notifications.html for details.
|
||||
*
|
||||
@ -3057,7 +3109,9 @@ BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
|
||||
*
|
||||
* @return The value of the target task's notification value before the bits
|
||||
* specified by ulBitsToClear were cleared.
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup ulTaskNotifyValueClear ulTaskNotifyValueClear
|
||||
* @endcond
|
||||
* \ingroup TaskNotifications
|
||||
*/
|
||||
uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
|
||||
@ -3069,7 +3123,7 @@ uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
|
||||
ulTaskGenericNotifyValueClear( ( xTask ), ( uxIndexToClear ), ( ulBitsToClear ) )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut );
|
||||
@ -3082,14 +3136,14 @@ uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
|
||||
* is to be captured. The captured time includes the tick count and the number
|
||||
* of times the tick count has overflowed since the system first booted.
|
||||
* \defgroup vTaskSetTimeOutState vTaskSetTimeOutState
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \ingroup TaskCtrl
|
||||
* @endcond
|
||||
*/
|
||||
void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code
|
||||
* BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
|
||||
@ -3170,7 +3224,7 @@ void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION;
|
||||
* return uxReceived;
|
||||
* }
|
||||
* @endcode
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \defgroup xTaskCheckForTimeOut xTaskCheckForTimeOut
|
||||
* @endcond
|
||||
* \ingroup TaskCtrl
|
||||
@ -3179,7 +3233,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
|
||||
TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp );
|
||||
@ -3204,7 +3258,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
|
||||
* blocked state and a context switch being performed. Otherwise pdFALSE.
|
||||
*
|
||||
* \defgroup xTaskCatchUpTicks xTaskCatchUpTicks
|
||||
* @cond
|
||||
* @cond !DOC_SINGLE_GROUP
|
||||
* \ingroup TaskCtrl
|
||||
* @endcond
|
||||
*/
|
||||
@ -3214,7 +3268,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION;
|
||||
/*-----------------------------------------------------------
|
||||
* SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES
|
||||
*----------------------------------------------------------*/
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
/*
|
||||
* Return the handle of the task running on a certain CPU. Because of
|
||||
* the nature of SMP processing, there is no guarantee that this
|
||||
@ -3335,8 +3389,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
|
||||
* making the call, otherwise pdFALSE.
|
||||
*/
|
||||
BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION;
|
||||
BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
||||
const TickType_t xItemValue ) PRIVILEGED_FUNCTION;
|
||||
void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
|
||||
const TickType_t xItemValue ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY
|
||||
@ -3399,11 +3453,6 @@ void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
|
||||
*/
|
||||
UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Get the current core affinity of a task
|
||||
*/
|
||||
BaseType_t xTaskGetAffinity( TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/*
|
||||
* Set the uxTaskNumber of the task referenced by the xTask parameter to
|
||||
* uxHandle.
|
||||
|
@ -450,7 +450,7 @@ void vTimerSetTimerID( TimerHandle_t xTimer,
|
||||
BaseType_t xTimerIsTimerActive( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* TaskHandle_t xTimerGetTimerDaemonTaskHandle( void );
|
||||
* @endcond
|
||||
*
|
||||
@ -1315,7 +1315,7 @@ TickType_t xTimerGetPeriod( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
|
||||
*/
|
||||
TickType_t xTimerGetExpiryTime( TimerHandle_t xTimer ) PRIVILEGED_FUNCTION;
|
||||
|
||||
/** @cond */
|
||||
/** @cond !DOC_EXCLUDE_HEADER_SECTION */
|
||||
|
||||
/*
|
||||
* Functions beyond this part are not part of the public API and are intended
|
||||
@ -1339,7 +1339,7 @@ BaseType_t xTimerGenericCommand( TimerHandle_t xTimer,
|
||||
#if ( configSUPPORT_STATIC_ALLOCATION == 1 )
|
||||
|
||||
/**
|
||||
* @cond
|
||||
* @cond !DOC_EXCLUDE_HEADER_SECTION
|
||||
* task.h
|
||||
* @code{c}
|
||||
* void vApplicationGetTimerTaskMemory( StaticTask_t ** ppxTimerTaskTCBBuffer, StackType_t ** ppxTimerTaskStackBuffer, uint32_t *pulTimerTaskStackSize )
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,78 +0,0 @@
|
||||
/*
|
||||
FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd.
|
||||
All rights reserved
|
||||
|
||||
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
|
||||
|
||||
***************************************************************************
|
||||
* *
|
||||
* FreeRTOS provides completely free yet professionally developed, *
|
||||
* robust, strictly quality controlled, supported, and cross *
|
||||
* platform software that has become a de facto standard. *
|
||||
* *
|
||||
* Help yourself get started quickly and support the FreeRTOS *
|
||||
* project by purchasing a FreeRTOS tutorial book, reference *
|
||||
* manual, or both from: http://www.FreeRTOS.org/Documentation *
|
||||
* *
|
||||
* Thank you! *
|
||||
* *
|
||||
***************************************************************************
|
||||
|
||||
This file is part of the FreeRTOS distribution.
|
||||
|
||||
FreeRTOS is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License (version 2) as published by the
|
||||
Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
|
||||
|
||||
>>! NOTE: The modification to the GPL is included to allow you to !<<
|
||||
>>! distribute a combined work that includes FreeRTOS without being !<<
|
||||
>>! obliged to provide the source code for proprietary components !<<
|
||||
>>! outside of the FreeRTOS kernel. !<<
|
||||
|
||||
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. Full license text is available from the following
|
||||
link: http://www.freertos.org/a00114.html
|
||||
|
||||
1 tab == 4 spaces!
|
||||
|
||||
***************************************************************************
|
||||
* *
|
||||
* Having a problem? Start by reading the FAQ "My application does *
|
||||
* not run, what could be wrong?" *
|
||||
* *
|
||||
* http://www.FreeRTOS.org/FAQHelp.html *
|
||||
* *
|
||||
***************************************************************************
|
||||
|
||||
http://www.FreeRTOS.org - Documentation, books, training, latest versions,
|
||||
license and Real Time Engineers Ltd. contact details.
|
||||
|
||||
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
|
||||
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
|
||||
compatible FAT file system, and our tiny thread aware UDP/IP stack.
|
||||
|
||||
http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
|
||||
Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
|
||||
licenses offer ticketed support, indemnification and middleware.
|
||||
|
||||
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
|
||||
engineered and independently SIL3 certified version for use in safety and
|
||||
mission critical applications that require provable dependability.
|
||||
|
||||
1 tab == 4 spaces!
|
||||
*/
|
||||
|
||||
|
||||
/* This header holds the macros for porting which should only be used inside FreeRTOS */
|
||||
|
||||
#pragma once
|
||||
#include "soc/soc_memory_layout.h"
|
||||
|
||||
//xTaskCreateStatic uses these functions to check incoming memory.
|
||||
#define portVALID_TCB_MEM(ptr) (esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr))
|
||||
#ifdef CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY
|
||||
#define portVALID_STACK_MEM(ptr) esp_ptr_byte_accessible(ptr)
|
||||
#else
|
||||
#define portVALID_STACK_MEM(ptr) (esp_ptr_internal(ptr) && esp_ptr_byte_accessible(ptr))
|
||||
#endif
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*******************************************************************************
|
||||
* NOTICE
|
||||
@ -176,28 +168,28 @@ void i2s_hal_enable_slave_fd_mode(i2s_hal_context_t *hal);
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_start_tx(hal) i2s_ll_tx_start((hal)->dev)
|
||||
void i2s_hal_start_tx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Start I2S rx
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_start_rx(hal) i2s_ll_rx_start((hal)->dev)
|
||||
void i2s_hal_start_rx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Stop I2S tx
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_stop_tx(hal) i2s_ll_tx_stop((hal)->dev)
|
||||
void i2s_hal_stop_tx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Stop I2S rx
|
||||
*
|
||||
* @param hal Context of the HAL layer
|
||||
*/
|
||||
#define i2s_hal_stop_rx(hal) i2s_ll_rx_stop((hal)->dev)
|
||||
void i2s_hal_stop_rx(i2s_hal_context_t *hal);
|
||||
|
||||
/**
|
||||
* @brief Set the received data length to trigger `in_suc_eof` interrupt.
|
||||
|
@ -1,22 +1,8 @@
|
||||
// Copyright 2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*******************************************************************************
|
||||
* NOTICE
|
||||
* The HAL is not public api, don't use in application code.
|
||||
* See readme.md in soc/README.md
|
||||
******************************************************************************/
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -21,15 +13,12 @@ extern "C" {
|
||||
/**
|
||||
* @brief LCD clock source
|
||||
* @note User should select the clock source based on the real requirement:
|
||||
* ╔═════════════════════╦══════════════════════════╦════════════════════════════╗
|
||||
* ║ LCD clock source ║ Features ║ Power Management ║
|
||||
* ╠═════════════════════╬══════════════════════════╬════════════════════════════╣
|
||||
* ║ LCD_CLK_SRC_PLL160M ║ High resolution, fixed ║ ESP_PM_APB_FREQ_MAX lock ║
|
||||
* ╠═════════════════════╬══════════════════════════╬════════════════════════════╣
|
||||
* ║ LCD_CLK_SRC_APLL ║ Configurable resolution ║ ESP_PM_NO_LIGHT_SLEEP lock ║
|
||||
* ╠═════════════════════╬══════════════════════════╬════════════════════════════╣
|
||||
* ║ LCD_CLK_SRC_XTAL ║ Medium resolution, fixed ║ No PM lock ║
|
||||
* ╚═════════════════════╩══════════════════════════╩════════════════════════════╝
|
||||
*
|
||||
* | LCD clock source | Features | Power Management |
|
||||
* |---------------------|--------------------------|----------------------------|
|
||||
* | LCD_CLK_SRC_PLL160M | High resolution, fixed | ESP_PM_APB_FREQ_MAX lock |
|
||||
* | LCD_CLK_SRC_APLL | Configurable resolution | ESP_PM_NO_LIGHT_SLEEP lock |
|
||||
* | LCD_CLK_SRC_XTAL | Medium resolution, fixed | No PM lock |
|
||||
*/
|
||||
typedef enum {
|
||||
LCD_CLK_SRC_PLL160M, /*!< Select PLL160M as the source clock */
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -155,12 +147,23 @@ typedef enum {
|
||||
TOUCH_PAD_INTR_MASK_INACTIVE = BIT(2), /*!<Inactive for one of the enabled channels. */
|
||||
TOUCH_PAD_INTR_MASK_SCAN_DONE = BIT(3), /*!<Measurement done for all the enabled channels. */
|
||||
TOUCH_PAD_INTR_MASK_TIMEOUT = BIT(4), /*!<Timeout for one of the enabled channels. */
|
||||
#if SOC_TOUCH_PROXIMITY_MEAS_DONE_SUPPORTED
|
||||
TOUCH_PAD_INTR_MASK_PROXI_MEAS_DONE = BIT(5), /*!<For proximity sensor, when the number of measurements reaches the set count of measurements, an interrupt will be generated. */
|
||||
TOUCH_PAD_INTR_MASK_MAX
|
||||
#define TOUCH_PAD_INTR_MASK_ALL (TOUCH_PAD_INTR_MASK_TIMEOUT \
|
||||
| TOUCH_PAD_INTR_MASK_SCAN_DONE \
|
||||
| TOUCH_PAD_INTR_MASK_INACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_ACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_DONE \
|
||||
| TOUCH_PAD_INTR_MASK_PROXI_MEAS_DONE) /*!<All touch interrupt type enable. */
|
||||
#else
|
||||
TOUCH_PAD_INTR_MASK_MAX
|
||||
#define TOUCH_PAD_INTR_MASK_ALL (TOUCH_PAD_INTR_MASK_TIMEOUT \
|
||||
| TOUCH_PAD_INTR_MASK_SCAN_DONE \
|
||||
| TOUCH_PAD_INTR_MASK_INACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_ACTIVE \
|
||||
| TOUCH_PAD_INTR_MASK_DONE) /*!<All touch interrupt type enable. */
|
||||
#endif
|
||||
} touch_pad_intr_mask_t;
|
||||
FLAG_ATTR(touch_pad_intr_mask_t)
|
||||
|
||||
|
@ -266,6 +266,12 @@
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* CONFIG_LWIP_DHCP_OPTIONS_LEN: The total length of outgoing DHCP option msg. If you have many options
|
||||
* and options value is too long, you can configure the length according to your requirements
|
||||
*/
|
||||
#define DHCP_OPTIONS_LEN CONFIG_LWIP_DHCP_OPTIONS_LEN
|
||||
|
||||
/*
|
||||
------------------------------------
|
||||
---------- AUTOIP options ----------
|
||||
|
@ -187,7 +187,10 @@ esp_err_t mdns_instance_name_set(const char * instance_name);
|
||||
* @note The value length of txt items will be automatically decided by strlen
|
||||
*
|
||||
* @param instance_name instance name to set. If NULL,
|
||||
* global instance name or hostname will be used
|
||||
* global instance name or hostname will be used.
|
||||
* Note that MDNS_MULTIPLE_INSTANCE config option
|
||||
* needs to be enabled for adding multiple instances
|
||||
* with the same instance type.
|
||||
* @param service_type service type (_http, _ftp, etc)
|
||||
* @param proto service protocol (_tcp, _udp)
|
||||
* @param port service port
|
||||
@ -209,6 +212,9 @@ esp_err_t mdns_service_add(const char * instance_name, const char * service_type
|
||||
*
|
||||
* @param instance_name instance name to set. If NULL,
|
||||
* global instance name or hostname will be used
|
||||
* Note that MDNS_MULTIPLE_INSTANCE config option
|
||||
* needs to be enabled for adding multiple instances
|
||||
* with the same instance type.
|
||||
* @param service_type service type (_http, _ftp, etc)
|
||||
* @param proto service protocol (_tcp, _udp)
|
||||
* @param hostname service hostname. If NULL, local hostname will be used.
|
||||
@ -238,6 +244,22 @@ esp_err_t mdns_service_add_for_host(const char * instance_name, const char * ser
|
||||
*/
|
||||
bool mdns_service_exists(const char * service_type, const char * proto, const char * hostname);
|
||||
|
||||
|
||||
/**
|
||||
* @brief Check whether a service has been added.
|
||||
*
|
||||
* @param instance instance name
|
||||
* @param service_type service type (_http, _ftp, etc)
|
||||
* @param proto service protocol (_tcp, _udp)
|
||||
* @param hostname service hostname. If NULL, checks for the local hostname.
|
||||
*
|
||||
* @return
|
||||
* - true Correspondding service has been added.
|
||||
* - false Service not found.
|
||||
*/
|
||||
bool mdns_service_exists_with_instance(const char *instance, const char *service_type, const char *proto,
|
||||
const char *hostname);
|
||||
|
||||
/**
|
||||
* @brief Remove service from mDNS server
|
||||
*
|
||||
|
@ -206,6 +206,7 @@ typedef struct {
|
||||
int network_timeout_ms; /*!< Abort network operation if it is not completed after this value, in milliseconds (defaults to 10s) */
|
||||
bool disable_keepalive; /*!< Set disable_keepalive=true to turn off keep-alive mechanism, false by default (keepalive is active by default). Note: setting the config value `keepalive` to `0` doesn't disable keepalive feature, but uses a default keepalive period */
|
||||
const char *path; /*!< Path in the URI*/
|
||||
int message_retransmit_timeout; /*!< timeout for retansmit of failded packet */
|
||||
} esp_mqtt_client_config_t;
|
||||
|
||||
/**
|
||||
@ -375,6 +376,7 @@ esp_err_t esp_mqtt_client_destroy(esp_mqtt_client_handle_t client);
|
||||
* @param config mqtt configuration structure
|
||||
*
|
||||
* @return ESP_ERR_NO_MEM if failed to allocate
|
||||
* ESP_ERR_INVALID_ARG if conflicts on transport configuration.
|
||||
* ESP_OK on success
|
||||
*/
|
||||
esp_err_t esp_mqtt_set_config(esp_mqtt_client_handle_t client, const esp_mqtt_client_config_t *config);
|
||||
|
@ -1,16 +1,13 @@
|
||||
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
// Allow for this warning suppression only in IDF_CI_BUILD
|
||||
#if !defined(ESP_OPENSSL_SUPPRESS_LEGACY_WARNING) || !defined(IDF_CI_BUILD)
|
||||
#warning "OpenSSL component will be removed from ESP-IDF in v5.0, please use esp_tls instead"
|
||||
#endif
|
||||
|
||||
#ifndef _SSL_H_
|
||||
#define _SSL_H_
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2018 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2018-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
@ -86,6 +78,10 @@ typedef struct protocomm_ble_config {
|
||||
* Pointer to the Name-UUID lookup table
|
||||
*/
|
||||
protocomm_ble_name_uuid_t *nu_lookup;
|
||||
|
||||
/* BLE bonding */
|
||||
unsigned ble_bonding:1;
|
||||
|
||||
} protocomm_ble_config_t;
|
||||
|
||||
/**
|
||||
|
@ -493,6 +493,7 @@ typedef struct rtc_sleep_config_s {
|
||||
uint32_t rtc_slowmem_pd_en : 1; //!< power down RTC slow memory
|
||||
uint32_t rtc_peri_pd_en : 1; //!< power down RTC peripherals
|
||||
uint32_t wifi_pd_en : 1; //!< power down WiFi
|
||||
uint32_t int_8m_pd_en : 1; //!< Power down Internal 8M oscillator
|
||||
uint32_t rom_mem_pd_en : 1; //!< power down main RAM and ROM
|
||||
uint32_t deep_slp : 1; //!< power down digital domain
|
||||
uint32_t wdt_flashboot_mod_en : 1; //!< enable WDT flashboot mode
|
||||
@ -513,6 +514,7 @@ typedef struct rtc_sleep_config_s {
|
||||
*
|
||||
* @param RTC_SLEEP_PD_x flags combined using bitwise OR
|
||||
*/
|
||||
#define is_dslp(pd_flags) ((pd_flags) & RTC_SLEEP_PD_DIG)
|
||||
#define RTC_SLEEP_CONFIG_DEFAULT(sleep_flags) { \
|
||||
.lslp_mem_inf_fpu = 0, \
|
||||
.rtc_mem_inf_fpu = 0, \
|
||||
@ -521,13 +523,18 @@ typedef struct rtc_sleep_config_s {
|
||||
.rtc_slowmem_pd_en = ((sleep_flags) & RTC_SLEEP_PD_RTC_SLOW_MEM) ? 1 : 0, \
|
||||
.rtc_peri_pd_en = ((sleep_flags) & RTC_SLEEP_PD_RTC_PERIPH) ? 1 : 0, \
|
||||
.wifi_pd_en = 0, \
|
||||
.int_8m_pd_en = is_dslp(sleep_flags) ? 1 : ((sleep_flags) & RTC_SLEEP_PD_INT_8M) ? 1 : 0, \
|
||||
.rom_mem_pd_en = 0, \
|
||||
.deep_slp = ((sleep_flags) & RTC_SLEEP_PD_DIG) ? 1 : 0, \
|
||||
.wdt_flashboot_mod_en = 0, \
|
||||
.dig_dbias_wak = RTC_CNTL_DBIAS_1V10, \
|
||||
.dig_dbias_slp = RTC_CNTL_DBIAS_0V90, \
|
||||
.dig_dbias_slp = is_dslp(sleep_flags) ? RTC_CNTL_DBIAS_0V90 \
|
||||
: !((sleep_flags) & RTC_SLEEP_PD_INT_8M) ? RTC_CNTL_DBIAS_1V10 \
|
||||
: RTC_CNTL_DBIAS_0V90, \
|
||||
.rtc_dbias_wak = RTC_CNTL_DBIAS_1V10, \
|
||||
.rtc_dbias_slp = RTC_CNTL_DBIAS_0V90, \
|
||||
.rtc_dbias_slp = is_dslp(sleep_flags) ? RTC_CNTL_DBIAS_0V90 \
|
||||
: !((sleep_flags) & RTC_SLEEP_PD_INT_8M) ? RTC_CNTL_DBIAS_1V10 \
|
||||
: RTC_CNTL_DBIAS_0V90, \
|
||||
.lslp_meminf_pd = 1, \
|
||||
.vddsdio_pd_en = ((sleep_flags) & RTC_SLEEP_PD_VDDSDIO) ? 1 : 0, \
|
||||
.xtal_fpu = ((sleep_flags) & RTC_SLEEP_PD_XTAL) ? 0 : 1 \
|
||||
@ -540,6 +547,7 @@ typedef struct rtc_sleep_config_s {
|
||||
#define RTC_SLEEP_PD_RTC_MEM_FOLLOW_CPU BIT(4) //!< RTC FAST and SLOW memories are automatically powered up and down along with the CPU
|
||||
#define RTC_SLEEP_PD_VDDSDIO BIT(5) //!< Power down VDDSDIO regulator
|
||||
#define RTC_SLEEP_PD_XTAL BIT(6) //!< Power down main XTAL
|
||||
#define RTC_SLEEP_PD_INT_8M BIT(7) //!< Power down Internal 8M oscillator
|
||||
|
||||
/* Various delays to be programmed into power control state machines */
|
||||
#define RTC_CNTL_XTL_BUF_WAIT_SLP_US (500)
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*
|
||||
* Soc capabilities file, describing the following chip attributes:
|
||||
@ -239,11 +231,11 @@
|
||||
#define SOC_TIMER_GROUP_TOTAL_TIMERS (SOC_TIMER_GROUPS * SOC_TIMER_GROUP_TIMERS_PER_GROUP)
|
||||
|
||||
/*-------------------------- TOUCH SENSOR CAPS -------------------------------*/
|
||||
#define SOC_TOUCH_VERSION_1 (1) /*!<Hardware version of touch sensor */
|
||||
#define SOC_TOUCH_SENSOR_NUM (10)
|
||||
|
||||
#define SOC_TOUCH_PAD_MEASURE_WAIT_MAX (0xFF) /*!<The timer frequency is 8Mhz, the max value is 0xff */
|
||||
#define SOC_TOUCH_PAD_THRESHOLD_MAX (0) /*!<If set touch threshold max value, The touch sensor can't be in touched status */
|
||||
#define SOC_TOUCH_PAD_WAKE_SUPPORTED (1) /*!<Supports waking up from touch pad trigger */
|
||||
|
||||
/*-------------------------- TWAI CAPS ---------------------------------------*/
|
||||
#define SOC_TWAI_BRP_MIN 2
|
||||
@ -295,6 +287,7 @@
|
||||
|
||||
/*-------------------------- Power Management CAPS ---------------------------*/
|
||||
#define SOC_PM_SUPPORT_EXT_WAKEUP (1)
|
||||
#define SOC_PM_SUPPORT_TOUCH_SENSOR_WAKEUP (1) /*!<Supports waking up from touch pad trigger */
|
||||
|
||||
/* ---------------------------- Compatibility ------------------------------- */
|
||||
#define SOC_CAN_SUPPORTED SOC_TWAI_SUPPORTED
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
@ -181,7 +181,9 @@ esp_partition_iterator_t esp_partition_next(esp_partition_iterator_t iterator);
|
||||
/**
|
||||
* @brief Release partition iterator
|
||||
*
|
||||
* @param iterator Iterator obtained using esp_partition_find. Must be non-NULL.
|
||||
* @param iterator Iterator obtained using esp_partition_find.
|
||||
* The iterator is allowed to be NULL, so it is not necessary to check its value
|
||||
* before calling this function.
|
||||
*
|
||||
*/
|
||||
void esp_partition_iterator_release(esp_partition_iterator_t iterator);
|
||||
|
@ -4,7 +4,9 @@
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* System level MSPI APIs (private)
|
||||
*/
|
||||
/**
|
||||
* Currently the MSPI timing tuning related APIs are designed to be private.
|
||||
* Because:
|
||||
@ -17,6 +19,10 @@
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
#include "sdkconfig.h"
|
||||
#include "esp_err.h"
|
||||
#if CONFIG_IDF_TARGET_ESP32
|
||||
#include "esp32/rom/spi_flash.h"
|
||||
#elif CONFIG_IDF_TARGET_ESP32S2
|
||||
@ -34,9 +40,9 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @brief Register ROM functions and init flash device registers to make use of octal flash
|
||||
* @brief To setup Flash chip
|
||||
*/
|
||||
esp_err_t esp_opiflash_init(void);
|
||||
esp_err_t spi_flash_init_chip_state(void);
|
||||
|
||||
/**
|
||||
* @brief Make MSPI work under 20Mhz
|
||||
@ -88,6 +94,12 @@ void spi_timing_get_flash_timing_param(spi_flash_hal_timing_config_t *out_timing
|
||||
*/
|
||||
bool spi_timine_config_flash_is_tuned(void);
|
||||
|
||||
/**
|
||||
* @brief Set Flash chip specifically required MSPI register settings here
|
||||
*/
|
||||
void spi_flash_set_vendor_required_regs(void);
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
@ -0,0 +1,64 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef _ESP_MBO_H
|
||||
#define _ESP_MBO_H
|
||||
|
||||
#include <stdbool.h>
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/**
|
||||
* enum non_pref_chan_reason: Reason for non preference of channel
|
||||
*/
|
||||
enum non_pref_chan_reason {
|
||||
NON_PREF_CHAN_REASON_UNSPECIFIED = 0,
|
||||
NON_PREF_CHAN_REASON_RSSI = 1,
|
||||
NON_PREF_CHAN_REASON_EXT_INTERFERENCE = 2,
|
||||
NON_PREF_CHAN_REASON_INT_INTERFERENCE = 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Channel structure for non preferred channel
|
||||
*
|
||||
* @param reason: enum non_pref_chan_reason
|
||||
* @param oper_class: operating class for the channel
|
||||
* @param chan: channel number
|
||||
* @param preference: channel preference
|
||||
*/
|
||||
struct non_pref_chan {
|
||||
enum non_pref_chan_reason reason;
|
||||
uint8_t oper_class;
|
||||
uint8_t chan;
|
||||
uint8_t preference;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Array structure for non preferred channel struct
|
||||
*
|
||||
* @param non_pref_chan_num: channel count
|
||||
* @param chan: array of non_pref_chan type
|
||||
*/
|
||||
struct non_pref_chan_s {
|
||||
size_t non_pref_chan_num;
|
||||
struct non_pref_chan chan[];
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Update channel preference for MBO IE
|
||||
*
|
||||
* @param non_pref_chan: Non preference channel list
|
||||
*
|
||||
* @return
|
||||
* - 0: success else failure
|
||||
*/
|
||||
int esp_mbo_update_non_pref_chan(struct non_pref_chan_s *non_pref_chan);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
@ -1,17 +1,7 @@
|
||||
/**
|
||||
* Copyright 2020 Espressif Systems (Shanghai) PTE LTD
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
#ifndef _ESP_WNM_H
|
||||
@ -29,11 +19,13 @@ enum btm_query_reason {
|
||||
REASON_UNSPECIFIED = 0,
|
||||
REASON_FRAME_LOSS = 1,
|
||||
REASON_DELAY = 2,
|
||||
REASON_QOS_CAPACITY = 3,
|
||||
REASON_FIRST_ASSOC = 4,
|
||||
REASON_LOAD_BALALNCE = 5,
|
||||
REASON_BETTER_AP = 6,
|
||||
REASON_CURRENT_DEAUTH = 7,
|
||||
REASON_BANDWIDTH = 3,
|
||||
REASON_LOAD_BALANCE = 4,
|
||||
REASON_RSSI = 5,
|
||||
REASON_RETRANSMISSIONS = 6,
|
||||
REASON_INTERFERENCE = 7,
|
||||
REASON_GRAY_ZONE = 8,
|
||||
REASON_PREMIUM_AP = 9,
|
||||
};
|
||||
|
||||
/**
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -323,7 +323,7 @@ SECTIONS
|
||||
*libesp_system.a:ubsan.*(.literal .literal.* .text .text.*)
|
||||
*libfreertos.a:(EXCLUDE_FILE(*libfreertos.a:port.* *libfreertos.a:port_common.*) .literal EXCLUDE_FILE(*libfreertos.a:port.* *libfreertos.a:port_common.*) .literal.* EXCLUDE_FILE(*libfreertos.a:port.* *libfreertos.a:port_common.*) .text EXCLUDE_FILE(*libfreertos.a:port.* *libfreertos.a:port_common.*) .text.*)
|
||||
*libfreertos.a:port.*(.literal.pxPortInitialiseStack .literal.unlikely.vPortEndScheduler .literal.vApplicationStackOverflowHook .literal.vPortAssertIfInISR .literal.vPortEnterCritical .literal.vPortExitCritical .literal.vPortReleaseTaskMPUSettings .literal.vPortSetStackWatchpoint .literal.vPortYieldOtherCore .literal.xPortInIsrContext .literal.xPortStartScheduler .text .text.pxPortInitialiseStack .text.unlikely.vPortEndScheduler .text.vApplicationStackOverflowHook .text.vPortAssertIfInISR .text.vPortEnterCritical .text.vPortExitCritical .text.vPortReleaseTaskMPUSettings .text.vPortSetStackWatchpoint .text.vPortStoreTaskMPUSettings .text.vPortYieldOtherCore .text.xPortGetTickRateHz .text.xPortInIsrContext .text.xPortStartScheduler)
|
||||
*libfreertos.a:port_common.*(.literal.esp_startup_start_app_common .text .text.esp_startup_start_app_common)
|
||||
*libfreertos.a:port_common.*(.literal.esp_startup_start_app_common .literal.xPortCheckValidTCBMem .literal.xPortcheckValidStackMem .text .text.esp_startup_start_app_common .text.xPortCheckValidTCBMem .text.xPortcheckValidStackMem)
|
||||
*libgcc.a:lib2funcs.*(.literal .literal.* .text .text.*)
|
||||
*libgcov.a:(.literal .literal.* .text .text.*)
|
||||
*libhal.a:cpu_hal.*(.literal .literal.* .text .text.*)
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user