IDF release/v4.4 6a7d83af19

* Update toolchain to 2021r2 (#5828)
* Update C3 libs to support only ECO3 and newer
This commit is contained in:
Me No Dev
2021-11-04 14:22:34 +02:00
committed by GitHub
parent caa8d07aaf
commit 418ac74be0
319 changed files with 5807 additions and 221 deletions

View File

@ -173,8 +173,8 @@
#define CONFIG_ESP_TLS_SERVER 1
#define CONFIG_ESP32C3_DEFAULT_CPU_FREQ_160 1
#define CONFIG_ESP32C3_DEFAULT_CPU_FREQ_MHZ 160
#define CONFIG_ESP32C3_REV_MIN_0 1
#define CONFIG_ESP32C3_REV_MIN 0
#define CONFIG_ESP32C3_REV_MIN_3 1
#define CONFIG_ESP32C3_REV_MIN 3
#define CONFIG_ESP32C3_DEBUG_OCDAWARE 1
#define CONFIG_ESP32C3_BROWNOUT_DET 1
#define CONFIG_ESP32C3_BROWNOUT_DET_LVL_SEL_7 1
@ -627,5 +627,5 @@
#define CONFIG_TIMER_TASK_STACK_SIZE CONFIG_ESP_TIMER_TASK_STACK_SIZE
#define CONFIG_TOOLPREFIX CONFIG_SDK_TOOLPREFIX
#define CONFIG_UDP_RECVMBOX_SIZE CONFIG_LWIP_UDP_RECVMBOX_SIZE
#define CONFIG_ARDUINO_IDF_COMMIT "2720d45e71"
#define CONFIG_ARDUINO_IDF_COMMIT "6a7d83af19"
#define CONFIG_ARDUINO_IDF_BRANCH "release/v4.4"

View File

@ -91,9 +91,10 @@ namespace dl
typedef enum
{
CONSTANT,
EDGE,
REFLECT,
SYMMETRIC,
PADDING_EMPTY,
PADDING_CONSTANT,
PADDING_EDGE,
PADDING_REFLECT,
PADDING_SYMMETRIC,
} padding_mode_t;
} // namespace dl

View File

@ -72,7 +72,7 @@ namespace dl
{
this->output = new Tensor<feature_t>;
}
this->output->set_exponent(this->output_exponent);
this->output->set_exponent(input0.exponent);
this->output->set_shape(this->output_shape);
this->output->free_element();
}

View File

@ -0,0 +1,169 @@
#pragma once
#include "dl_nn_pad.hpp"
#include "dl_layer_base.hpp"
namespace dl
{
namespace layer
{
/**
* @brief Pad.
*
* @tparam feature_t supports int16_t and int8_t,
* - int16_t: stands for operation in int16_t quantize
* - int8_t: stands for operation in int8_t quantize
*/
template <typename feature_t>
class Pad : public Layer
{
private:
std::vector<int> paddings;
std::vector<feature_t> constant_values;
padding_mode_t mode;
Tensor<feature_t> *output; /*<! output ptr of Pad >*/
std::vector<int> output_shape; /*<! output shape of Pad >*/
public:
Pad(std::vector<int> paddings,
std::vector<feature_t> constant_values = {0},
padding_mode_t mode = PADDING_CONSTANT,
const char *name = "Pad") : Layer(name),
paddings(paddings),
constant_values(constant_values),
mode(mode)
{
this->output = new Tensor<feature_t>;
}
/**
* @brief Destroy the Pad object.
*
*/
~Pad()
{
if (this->output != NULL)
{
delete this->output;
}
}
/**
* @brief Update output padding and input padding.
*
* @param input as an input
* @param print_shape whether to print the output shape.
*/
void build(Tensor<feature_t> &input, bool print_shape = false)
{
assert(this->paddings.size() > 0);
int input_dims = input.shape.size();
int padding_dims = input_dims * 2;
if (this->paddings.size() == 1)
{
std::vector<int> _paddings(padding_dims, 0);
for (int i = 0; i < padding_dims; ++i)
{
_paddings[i] = this->paddings[0];
}
this->paddings = _paddings;
}
else if (this->paddings.size() == 2)
{
std::vector<int> _paddings(padding_dims, 0);
for (int i = 0; i < input_dims; ++i)
{
_paddings[2 * i] = this->paddings[0];
_paddings[2 * i + 1] = this->paddings[1];
}
this->paddings = _paddings;
}
else
{
assert(this->paddings.size() == padding_dims);
}
if (this->mode == PADDING_CONSTANT)
{
if (this->constant_values.size() == 1)
{
std::vector<feature_t> _constant_values(padding_dims, 0);
for (int i = 0; i < padding_dims; ++i)
{
_constant_values[i] = this->constant_values[0];
}
this->constant_values = _constant_values;
}
else if (this->constant_values.size() == 2)
{
std::vector<feature_t> _constant_values(padding_dims, 0);
for (int i = 0; i < input_dims; ++i)
{
_constant_values[2 * i] = this->constant_values[0];
_constant_values[2 * i + 1] = this->constant_values[1];
}
this->constant_values = _constant_values;
}
else
{
assert(constant_values.size() == padding_dims);
}
}
this->output_shape = input.shape;
for (int i = 0; i < input_dims; ++i)
{
this->output_shape[i] += (this->paddings[2 * i] + this->paddings[2 * i + 1]);
}
this->output->set_shape(this->output_shape);
this->output->set_exponent(input.exponent);
this->output->free_element();
if (print_shape)
{
std::cout << this->name << " | ";
this->output->print_shape();
}
}
/**
* @brief Get the output
*
* @return Tensor<feature_t>& Pad result
*/
Tensor<feature_t> &get_output()
{
return *this->output;
}
/**
* @brief Call Pad operation
*
* @param input as an input.
* @param autoload_enable one of true or false,
* - true: load input and output from PSRAM to CACHE automatically
* - false: do not
* @param assign_core not effective yet
* @return Pad result
*/
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_LAYER_LATENCY_INIT();
DL_LOG_LAYER_LATENCY_START();
if (this->output->shape != this->output_shape)
{
this->output->set_shape(this->output_shape);
}
this->output->malloc_element();
this->output->set_exponent(input.exponent);
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
DL_LOG_LAYER_LATENCY_START();
nn::pad(*this->output, input, this->paddings, this->constant_values, this->mode, assign_core);
DL_LOG_LAYER_LATENCY_END(this->name, "pad");
return *this->output;
}
};
} // namespace layer
} // namespace dl

View File

@ -0,0 +1,120 @@
#pragma once
#include "dl_constant.hpp"
#include "dl_variable.hpp"
#include "dl_nn.hpp"
namespace dl
{
namespace nn
{
/**
* @brief pad(input)
*
* @tparam feature_t
* @param output as an output
* @param input as an input
* @param paddings number of values padded to the edges of each dim
* @param constant_values used in PADDING_CONSTANT, the values to set the padded values for each dim
* @param mode One of the following: PADDING_EMPTY, PADDING_CONSTANT, PADDING_EDGE, PADDING_REFLECT, PADDING_SYMMETRIC
* @param assign_core not effective yet
*/
template <typename feature_t>
void pad(Tensor<feature_t> &output,
Tensor<feature_t> &input,
std::vector<int> paddings,
std::vector<feature_t> constant_values,
padding_mode_t mode,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE);
/**
* @brief
*
* @tparam feature_t
* @param input as an input
* @param paddings number of values padded to the edges of each dim
* @param constant_values used in PADDING_CONSTANT, the values to set the padded values for each dim
* @param mode One of the following: PADDING_EMPTY, PADDING_CONSTANT, PADDING_EDGE, PADDING_REFLECT, PADDING_SYMMETRIC
* @param assign_core not effective yet
* @return Tensor<feature_t> the padded Tensor
*/
template <typename feature_t>
Tensor<feature_t> pad(Tensor<feature_t> &input,
std::vector<int> paddings,
std::vector<feature_t> constant_values,
padding_mode_t mode,
const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
{
DL_LOG_NN_LATENCY_INIT();
DL_LOG_NN_LATENCY_START();
assert(paddings.size() > 0);
int input_dims = input.shape.size();
int padding_dims = input_dims * 2;
std::vector<int> _paddings(padding_dims, 0);
if (paddings.size() == 1)
{
for (int i = 0; i < padding_dims; ++i)
{
_paddings[i] = paddings[0];
}
}
else if (paddings.size() == 2)
{
for (int i = 0; i < input_dims; ++i)
{
_paddings[2 * i] = paddings[0];
_paddings[2 * i + 1] = paddings[1];
}
}
else
{
assert(paddings.size() == padding_dims);
_paddings = paddings;
}
std::vector<feature_t> _constant_values(padding_dims, 0);
if (mode == PADDING_CONSTANT)
{
if (constant_values.size() == 1)
{
for (int i = 0; i < padding_dims; ++i)
{
_constant_values[i] = constant_values[0];
}
}
else if (constant_values.size() == 2)
{
for (int i = 0; i < input_dims; ++i)
{
_constant_values[2 * i] = constant_values[0];
_constant_values[2 * i + 1] = constant_values[1];
}
}
else
{
assert(constant_values.size() == padding_dims);
_constant_values = constant_values;
}
}
std::vector<int> output_shape = input.shape;
for (int i = 0; i < input_dims; ++i)
{
output_shape[i] += (_paddings[2 * i] + _paddings[2 * i + 1]);
}
Tensor<feature_t> output;
output.set_exponent(input.exponent).set_shape(output_shape).malloc_element();
DL_LOG_NN_LATENCY_END("apply");
DL_LOG_NN_LATENCY_START();
pad(output, input, _paddings, _constant_values, mode, assign_core);
DL_LOG_NN_LATENCY_END("pad");
return output;
}
} // namespace nn
} // namespace dl

View File

@ -71,7 +71,7 @@ namespace dl
}
/**
* @brief
* @brief copy the element of the input Tensor.
*
* @param input an input Tensor
* @param deep one of true or false
@ -258,6 +258,56 @@ namespace dl
return this->element[index];
}
/**
* @brief Set the all the element to value.
*
* @param value target value
* @return Tensor<T>& self
*/
Tensor<T> &set_value(T value);
/**
* @brief Set the the element to value
*
* @param value target value, it will be broadcast automatically.
* @return Tensor<T>& self
*/
Tensor<T> &set_value(Tensor<T> &value);
/**
* @brief Set the sliced element to value
*
* @param axis_index_range range of slices
* @param value target value
* @return Tensor<T>& self
*/
Tensor<T> &set_value(std::vector<int> axis_index_range, T value);
/**
* @brief Set the sliced element to value
*
* @param axis_index_range range of slices
* @param value target value, it will be broadcast automatically.
* @return Tensor<T>& self
*/
Tensor<T> &set_value(std::vector<int> axis_index_range, Tensor<T> &value);
/**
* @brief Extracts a slice from the Tensor.
*
* @param axis_index_range range of slices
* @return Tensor<T> output
*/
Tensor<T> slice(std::vector<int> axis_index_range);
/**
* @brief Reverses specific dims of the tensor.
*
* @param axis The dims to be reversed
* @return Tensor<T>&
*/
Tensor<T> &reverse(std::vector<int> axis);
/**
* @brief Get the size of Tensor.
*
@ -491,5 +541,16 @@ namespace dl
return *this;
}
}
static Tensor<T> arange(int size)
{
Tensor<T> output;
output.set_auto_free(true).set_exponent(0).set_shape({size}).malloc_element();
for (int i = 0; i < size; ++i)
{
output.element[i] = i;
}
return output;
}
};
} // namespace dl

View File

@ -85,6 +85,13 @@ uint8_t phy_dig_reg_backup(bool backup_en, uint32_t *mem_addr);
void phy_freq_mem_backup(bool backup_en, uint32_t *mem);
#endif
#if CONFIG_ESP_PHY_ENABLE_USB
/**
* @brief Enable or disable USB when phy init.
*/
void phy_bbpll_en_usb(bool en);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -4,8 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
// Allow for this warning suppression only in IDF_CI_BUILD
#if !defined(ESP_OPENSSL_SUPPRESS_LEGACY_WARNING) || !defined(IDF_CI_BUILD)
#if !defined(ESP_OPENSSL_SUPPRESS_LEGACY_WARNING)
#warning "OpenSSL component will be removed from ESP-IDF in v5.0, please use esp_tls instead"
#endif