mirror of
https://github.com/0xFEEDC0DE64/arduino-esp32.git
synced 2025-06-28 11:20:59 +02:00
IDF release/v4.4 a79dc75f0a (#6048)
This commit is contained in:
File diff suppressed because one or more lines are too long
@ -303,7 +303,7 @@ env.Append(
|
||||
"UNITY_INCLUDE_CONFIG_H",
|
||||
"WITH_POSIX",
|
||||
"_GNU_SOURCE",
|
||||
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
|
||||
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
|
||||
"ESP_PLATFORM",
|
||||
"_POSIX_READER_WRITER_LOCKS",
|
||||
"ARDUINO_ARCH_ESP32",
|
||||
|
@ -293,7 +293,7 @@ env.Append(
|
||||
"UNITY_INCLUDE_CONFIG_H",
|
||||
"WITH_POSIX",
|
||||
"_GNU_SOURCE",
|
||||
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
|
||||
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
|
||||
"ESP_PLATFORM",
|
||||
"_POSIX_READER_WRITER_LOCKS",
|
||||
"ARDUINO_ARCH_ESP32",
|
||||
|
@ -290,7 +290,7 @@ env.Append(
|
||||
"UNITY_INCLUDE_CONFIG_H",
|
||||
"WITH_POSIX",
|
||||
"_GNU_SOURCE",
|
||||
("IDF_VER", '\\"v4.4-beta1-183-gf23dcd3555\\"'),
|
||||
("IDF_VER", '\\"v4.4-beta1-189-ga79dc75f0a\\"'),
|
||||
"ESP_PLATFORM",
|
||||
"_POSIX_READER_WRITER_LOCKS",
|
||||
"ARDUINO_ARCH_ESP32",
|
||||
|
@ -406,7 +406,9 @@
|
||||
#define CONFIG_LWIP_IPV6_ND6_NUM_NEIGHBORS 5
|
||||
#define CONFIG_LWIP_ICMP 1
|
||||
#define CONFIG_LWIP_MAX_RAW_PCBS 16
|
||||
#define CONFIG_LWIP_SNTP_MAX_SERVERS 1
|
||||
#define CONFIG_LWIP_SNTP_MAX_SERVERS 3
|
||||
#define CONFIG_LWIP_DHCP_GET_NTP_SRV 1
|
||||
#define CONFIG_LWIP_DHCP_MAX_NTP_SERVERS 1
|
||||
#define CONFIG_LWIP_SNTP_UPDATE_DELAY 3600000
|
||||
#define CONFIG_LWIP_ESP_LWIP_ASSERT 1
|
||||
#define CONFIG_LWIP_HOOK_TCP_ISN_DEFAULT 1
|
||||
@ -677,5 +679,5 @@
|
||||
#define CONFIG_ULP_COPROC_RESERVE_MEM CONFIG_ESP32_ULP_COPROC_RESERVE_MEM
|
||||
#define CONFIG_WARN_WRITE_STRINGS CONFIG_COMPILER_WARN_WRITE_STRINGS
|
||||
#define CONFIG_WIFI_LWIP_ALLOCATION_FROM_SPIRAM_FIRST CONFIG_SPIRAM_TRY_ALLOCATE_WIFI_LWIP
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "f23dcd3555"
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "a79dc75f0a"
|
||||
#define CONFIG_ARDUINO_IDF_BRANCH "release/v4.4"
|
||||
|
@ -66,19 +66,18 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
@ -59,7 +59,7 @@ namespace dl
|
||||
this->output_shape = {input.get_size()};
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -10,14 +10,14 @@ namespace dl
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief LeakyReLU(input).
|
||||
* @brief LeakyRelu(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class LeakyReLU : public Layer
|
||||
class LeakyRelu : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t activation_alpha; /*<! quantized alpha >*/
|
||||
@ -28,7 +28,7 @@ namespace dl
|
||||
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new LeakyReLU object
|
||||
* @brief Construct a new LeakyRelu object
|
||||
*
|
||||
* @param activation_alpha quantized alpha
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
@ -36,7 +36,7 @@ namespace dl
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
LeakyRelu(const int activation_alpha, const int activation_exponent, const char *name = "LeakyRelu", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
{
|
||||
this->activation_alpha = activation_alpha;
|
||||
this->activation_exponent = activation_exponent;
|
||||
@ -44,10 +44,10 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the LeakyReLU object
|
||||
* @brief Destroy the LeakyRelu object
|
||||
*
|
||||
*/
|
||||
~LeakyReLU()
|
||||
~LeakyRelu()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -66,7 +66,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -90,7 +90,7 @@ namespace dl
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& LeakyReLU result
|
||||
* @return Tensor<feature_t>& LeakyRelu result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
@ -98,11 +98,11 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call LeakyReLU operation.
|
||||
* @brief Call LeakyRelu operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return LeakyReLU result
|
||||
* @return LeakyRelu result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -130,7 +130,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -132,7 +132,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::max2d<true>(*this->output, input0, input1, assign_core);
|
||||
nn::max2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -132,7 +132,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::min2d<true>(*this->output, input0, input1, assign_core);
|
||||
nn::min2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -140,7 +140,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
|
||||
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
|
||||
}
|
||||
|
||||
|
@ -10,17 +10,17 @@ namespace dl
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief PReLU(input).
|
||||
* @brief PRelu(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class PReLU : public Layer
|
||||
class PRelu : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
|
||||
const feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
|
||||
int activation_exponent; /*<! exponent of quantized alpha elements >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
@ -28,7 +28,7 @@ namespace dl
|
||||
std::vector<int> output_shape; /*<! output shape of prelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new PReLU object
|
||||
* @brief Construct a new PRelu object
|
||||
*
|
||||
* @param activation_element quantized alpha elements along channel axis
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
@ -36,10 +36,10 @@ namespace dl
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
PReLU(const feature_t *activation_element,
|
||||
PRelu(const feature_t *activation_element,
|
||||
const int activation_exponent = 0,
|
||||
const char *name = NULL,
|
||||
bool inplace = "PReLU") : Layer(name),
|
||||
const char *name = "PRelu",
|
||||
bool inplace = false) : Layer(name),
|
||||
activation_element(activation_element),
|
||||
activation_exponent(activation_exponent),
|
||||
output(NULL),
|
||||
@ -49,10 +49,10 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the PReLU object
|
||||
* @brief Destroy the PRelu object
|
||||
*
|
||||
*/
|
||||
~PReLU()
|
||||
~PRelu()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -71,7 +71,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -94,7 +94,7 @@ namespace dl
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& PReLU result
|
||||
* @return Tensor<feature_t>& PRelu result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
@ -102,11 +102,11 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call PReLU operation.
|
||||
* @brief Call PRelu operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return PReLU result
|
||||
* @return PRelu result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -125,7 +125,7 @@ namespace dl
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -135,7 +135,7 @@ namespace dl
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
|
@ -61,7 +61,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -64,19 +64,21 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->reshape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->reshape(this->output_shape);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
|
@ -66,7 +66,7 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -78,7 +78,6 @@ namespace dl
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->squeeze(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
@ -71,7 +71,7 @@ namespace dl
|
||||
this->output_shape = input0.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -120,12 +120,12 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output.malloc_element();
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::sub2d(this->output, input0, input1, this->activation, assign_core);
|
||||
nn::sub2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
else
|
||||
@ -135,7 +135,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::sub2d<true>(this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
nn::sub2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
return *this->output;
|
||||
|
@ -63,13 +63,24 @@ namespace dl
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
this->output_shape = input.shape;
|
||||
for (int i = 0; i < this->perm.size(); i++)
|
||||
int dims = this->output_shape.size();
|
||||
if (this->perm.size() == 0)
|
||||
{
|
||||
for (int i = dims - 1; i >= 0; i--)
|
||||
{
|
||||
this->perm.push_back(i);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < dims; ++i)
|
||||
{
|
||||
if (this->perm[i] < 0)
|
||||
this->perm[i] = dims + this->perm[i];
|
||||
this->output_shape[i] = input.shape[this->perm[i]];
|
||||
}
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -396,9 +396,6 @@ namespace dl
|
||||
* @brief print all the element of the Tensor.
|
||||
*
|
||||
* @param message to print
|
||||
* @param with_padding one of true or false,
|
||||
* - true: the padding element will also be ed
|
||||
* - false: the padding element will not be ed
|
||||
*/
|
||||
void print_all(const char *message = "")
|
||||
{
|
||||
@ -553,4 +550,4 @@ namespace dl
|
||||
return output;
|
||||
}
|
||||
};
|
||||
} // namespace dl
|
||||
} // namespace dl
|
||||
|
@ -1,16 +1,8 @@
|
||||
// Copyright 2015-2021 Espressif Systems (Shanghai) PTE LTD
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
/*
|
||||
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/*******************************************************************************
|
||||
* NOTICE
|
||||
@ -339,17 +331,15 @@ static inline void mcpwm_ll_timer_set_count_mode(mcpwm_dev_t *mcpwm, int timer_i
|
||||
static inline mcpwm_timer_count_mode_t mcpwm_ll_timer_get_count_mode(mcpwm_dev_t *mcpwm, int timer_id)
|
||||
{
|
||||
switch (mcpwm->timer[timer_id].timer_cfg1.timer_mod) {
|
||||
case 0:
|
||||
return MCPWM_TIMER_COUNT_MODE_PAUSE;
|
||||
case 1:
|
||||
return MCPWM_TIMER_COUNT_MODE_UP;
|
||||
case 2:
|
||||
return MCPWM_TIMER_COUNT_MODE_DOWN;
|
||||
case 3:
|
||||
return MCPWM_TIMER_COUNT_MODE_UP_DOWN;
|
||||
case 0:
|
||||
default:
|
||||
HAL_ASSERT(false && "unknown count mode");
|
||||
return mcpwm->timer[timer_id].timer_cfg1.timer_mod;
|
||||
return MCPWM_TIMER_COUNT_MODE_PAUSE;
|
||||
}
|
||||
}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1115,8 +1115,9 @@ CONFIG_LWIP_MAX_RAW_PCBS=16
|
||||
#
|
||||
# SNTP
|
||||
#
|
||||
CONFIG_LWIP_SNTP_MAX_SERVERS=1
|
||||
# CONFIG_LWIP_DHCP_GET_NTP_SRV is not set
|
||||
CONFIG_LWIP_SNTP_MAX_SERVERS=3
|
||||
CONFIG_LWIP_DHCP_GET_NTP_SRV=y
|
||||
CONFIG_LWIP_DHCP_MAX_NTP_SERVERS=1
|
||||
CONFIG_LWIP_SNTP_UPDATE_DELAY=3600000
|
||||
# end of SNTP
|
||||
|
||||
|
@ -375,7 +375,9 @@
|
||||
#define CONFIG_LWIP_IPV6_ND6_NUM_NEIGHBORS 5
|
||||
#define CONFIG_LWIP_ICMP 1
|
||||
#define CONFIG_LWIP_MAX_RAW_PCBS 16
|
||||
#define CONFIG_LWIP_SNTP_MAX_SERVERS 1
|
||||
#define CONFIG_LWIP_SNTP_MAX_SERVERS 3
|
||||
#define CONFIG_LWIP_DHCP_GET_NTP_SRV 1
|
||||
#define CONFIG_LWIP_DHCP_MAX_NTP_SERVERS 1
|
||||
#define CONFIG_LWIP_SNTP_UPDATE_DELAY 3600000
|
||||
#define CONFIG_LWIP_ESP_LWIP_ASSERT 1
|
||||
#define CONFIG_LWIP_HOOK_TCP_ISN_DEFAULT 1
|
||||
@ -630,5 +632,5 @@
|
||||
#define CONFIG_TIMER_TASK_STACK_SIZE CONFIG_ESP_TIMER_TASK_STACK_SIZE
|
||||
#define CONFIG_TOOLPREFIX CONFIG_SDK_TOOLPREFIX
|
||||
#define CONFIG_UDP_RECVMBOX_SIZE CONFIG_LWIP_UDP_RECVMBOX_SIZE
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "f23dcd3555"
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "a79dc75f0a"
|
||||
#define CONFIG_ARDUINO_IDF_BRANCH "release/v4.4"
|
||||
|
@ -66,19 +66,18 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
@ -59,7 +59,7 @@ namespace dl
|
||||
this->output_shape = {input.get_size()};
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -10,14 +10,14 @@ namespace dl
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief LeakyReLU(input).
|
||||
* @brief LeakyRelu(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class LeakyReLU : public Layer
|
||||
class LeakyRelu : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t activation_alpha; /*<! quantized alpha >*/
|
||||
@ -28,7 +28,7 @@ namespace dl
|
||||
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new LeakyReLU object
|
||||
* @brief Construct a new LeakyRelu object
|
||||
*
|
||||
* @param activation_alpha quantized alpha
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
@ -36,7 +36,7 @@ namespace dl
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
LeakyRelu(const int activation_alpha, const int activation_exponent, const char *name = "LeakyRelu", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
{
|
||||
this->activation_alpha = activation_alpha;
|
||||
this->activation_exponent = activation_exponent;
|
||||
@ -44,10 +44,10 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the LeakyReLU object
|
||||
* @brief Destroy the LeakyRelu object
|
||||
*
|
||||
*/
|
||||
~LeakyReLU()
|
||||
~LeakyRelu()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -66,7 +66,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -90,7 +90,7 @@ namespace dl
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& LeakyReLU result
|
||||
* @return Tensor<feature_t>& LeakyRelu result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
@ -98,11 +98,11 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call LeakyReLU operation.
|
||||
* @brief Call LeakyRelu operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return LeakyReLU result
|
||||
* @return LeakyRelu result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -130,7 +130,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -132,7 +132,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::max2d<true>(*this->output, input0, input1, assign_core);
|
||||
nn::max2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -132,7 +132,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::min2d<true>(*this->output, input0, input1, assign_core);
|
||||
nn::min2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -140,7 +140,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
|
||||
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
|
||||
}
|
||||
|
||||
|
@ -10,17 +10,17 @@ namespace dl
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief PReLU(input).
|
||||
* @brief PRelu(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class PReLU : public Layer
|
||||
class PRelu : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
|
||||
const feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
|
||||
int activation_exponent; /*<! exponent of quantized alpha elements >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
@ -28,7 +28,7 @@ namespace dl
|
||||
std::vector<int> output_shape; /*<! output shape of prelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new PReLU object
|
||||
* @brief Construct a new PRelu object
|
||||
*
|
||||
* @param activation_element quantized alpha elements along channel axis
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
@ -36,10 +36,10 @@ namespace dl
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
PReLU(const feature_t *activation_element,
|
||||
PRelu(const feature_t *activation_element,
|
||||
const int activation_exponent = 0,
|
||||
const char *name = NULL,
|
||||
bool inplace = "PReLU") : Layer(name),
|
||||
const char *name = "PRelu",
|
||||
bool inplace = false) : Layer(name),
|
||||
activation_element(activation_element),
|
||||
activation_exponent(activation_exponent),
|
||||
output(NULL),
|
||||
@ -49,10 +49,10 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the PReLU object
|
||||
* @brief Destroy the PRelu object
|
||||
*
|
||||
*/
|
||||
~PReLU()
|
||||
~PRelu()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -71,7 +71,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -94,7 +94,7 @@ namespace dl
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& PReLU result
|
||||
* @return Tensor<feature_t>& PRelu result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
@ -102,11 +102,11 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call PReLU operation.
|
||||
* @brief Call PRelu operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return PReLU result
|
||||
* @return PRelu result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -125,7 +125,7 @@ namespace dl
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -135,7 +135,7 @@ namespace dl
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
|
@ -61,7 +61,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -64,19 +64,21 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->reshape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->reshape(this->output_shape);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
|
@ -66,7 +66,7 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -78,7 +78,6 @@ namespace dl
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->squeeze(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
@ -71,7 +71,7 @@ namespace dl
|
||||
this->output_shape = input0.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -120,12 +120,12 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output.malloc_element();
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::sub2d(this->output, input0, input1, this->activation, assign_core);
|
||||
nn::sub2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
else
|
||||
@ -135,7 +135,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::sub2d<true>(this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
nn::sub2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
return *this->output;
|
||||
|
@ -63,13 +63,24 @@ namespace dl
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
this->output_shape = input.shape;
|
||||
for (int i = 0; i < this->perm.size(); i++)
|
||||
int dims = this->output_shape.size();
|
||||
if (this->perm.size() == 0)
|
||||
{
|
||||
for (int i = dims - 1; i >= 0; i--)
|
||||
{
|
||||
this->perm.push_back(i);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < dims; ++i)
|
||||
{
|
||||
if (this->perm[i] < 0)
|
||||
this->perm[i] = dims + this->perm[i];
|
||||
this->output_shape[i] = input.shape[this->perm[i]];
|
||||
}
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -396,9 +396,6 @@ namespace dl
|
||||
* @brief print all the element of the Tensor.
|
||||
*
|
||||
* @param message to print
|
||||
* @param with_padding one of true or false,
|
||||
* - true: the padding element will also be ed
|
||||
* - false: the padding element will not be ed
|
||||
*/
|
||||
void print_all(const char *message = "")
|
||||
{
|
||||
@ -553,4 +550,4 @@ namespace dl
|
||||
return output;
|
||||
}
|
||||
};
|
||||
} // namespace dl
|
||||
} // namespace dl
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1134,8 +1134,9 @@ CONFIG_LWIP_MAX_RAW_PCBS=16
|
||||
#
|
||||
# SNTP
|
||||
#
|
||||
CONFIG_LWIP_SNTP_MAX_SERVERS=1
|
||||
# CONFIG_LWIP_DHCP_GET_NTP_SRV is not set
|
||||
CONFIG_LWIP_SNTP_MAX_SERVERS=3
|
||||
CONFIG_LWIP_DHCP_GET_NTP_SRV=y
|
||||
CONFIG_LWIP_DHCP_MAX_NTP_SERVERS=1
|
||||
CONFIG_LWIP_SNTP_UPDATE_DELAY=3600000
|
||||
# end of SNTP
|
||||
|
||||
|
@ -328,7 +328,9 @@
|
||||
#define CONFIG_LWIP_IPV6_ND6_NUM_NEIGHBORS 5
|
||||
#define CONFIG_LWIP_ICMP 1
|
||||
#define CONFIG_LWIP_MAX_RAW_PCBS 16
|
||||
#define CONFIG_LWIP_SNTP_MAX_SERVERS 1
|
||||
#define CONFIG_LWIP_SNTP_MAX_SERVERS 3
|
||||
#define CONFIG_LWIP_DHCP_GET_NTP_SRV 1
|
||||
#define CONFIG_LWIP_DHCP_MAX_NTP_SERVERS 1
|
||||
#define CONFIG_LWIP_SNTP_UPDATE_DELAY 3600000
|
||||
#define CONFIG_LWIP_ESP_LWIP_ASSERT 1
|
||||
#define CONFIG_LWIP_HOOK_TCP_ISN_DEFAULT 1
|
||||
@ -577,5 +579,5 @@
|
||||
#define CONFIG_USB_MSC_BUFSIZE CONFIG_TINYUSB_MSC_BUFSIZE
|
||||
#define CONFIG_USB_MSC_ENABLED CONFIG_TINYUSB_MSC_ENABLED
|
||||
#define CONFIG_WARN_WRITE_STRINGS CONFIG_COMPILER_WARN_WRITE_STRINGS
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "f23dcd3555"
|
||||
#define CONFIG_ARDUINO_IDF_COMMIT "a79dc75f0a"
|
||||
#define CONFIG_ARDUINO_IDF_BRANCH "release/v4.4"
|
||||
|
@ -66,19 +66,18 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->expand_dims(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
@ -59,7 +59,7 @@ namespace dl
|
||||
this->output_shape = {input.get_size()};
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -10,14 +10,14 @@ namespace dl
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief LeakyReLU(input).
|
||||
* @brief LeakyRelu(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class LeakyReLU : public Layer
|
||||
class LeakyRelu : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t activation_alpha; /*<! quantized alpha >*/
|
||||
@ -28,7 +28,7 @@ namespace dl
|
||||
std::vector<int> output_shape; /*<! output shape of leakyrelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new LeakyReLU object
|
||||
* @brief Construct a new LeakyRelu object
|
||||
*
|
||||
* @param activation_alpha quantized alpha
|
||||
* @param activation_exponent exponent of quantized alpha
|
||||
@ -36,7 +36,7 @@ namespace dl
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
LeakyReLU(const int activation_alpha, const int activation_exponent, const char *name = "LeakyReLU", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
LeakyRelu(const int activation_alpha, const int activation_exponent, const char *name = "LeakyRelu", bool inplace = false) : Layer(name), output(NULL), output_shape({})
|
||||
{
|
||||
this->activation_alpha = activation_alpha;
|
||||
this->activation_exponent = activation_exponent;
|
||||
@ -44,10 +44,10 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the LeakyReLU object
|
||||
* @brief Destroy the LeakyRelu object
|
||||
*
|
||||
*/
|
||||
~LeakyReLU()
|
||||
~LeakyRelu()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -66,7 +66,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -90,7 +90,7 @@ namespace dl
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& LeakyReLU result
|
||||
* @return Tensor<feature_t>& LeakyRelu result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
@ -98,11 +98,11 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call LeakyReLU operation.
|
||||
* @brief Call LeakyRelu operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return LeakyReLU result
|
||||
* @return LeakyRelu result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -130,7 +130,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::leakyrelu<true>(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
nn::leakyrelu(*this->output, input, this->activation_alpha, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -132,7 +132,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::max2d<true>(*this->output, input0, input1, assign_core);
|
||||
nn::max2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "max2d");
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -132,7 +132,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::min2d<true>(*this->output, input0, input1, assign_core);
|
||||
nn::min2d(*this->output, input0, input1, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "min2d");
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ namespace dl
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -140,7 +140,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::mul2d<true>(*this->output, input0, input1, this->activation, assign_core);
|
||||
nn::mul2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "mul2d");
|
||||
}
|
||||
|
||||
|
@ -10,17 +10,17 @@ namespace dl
|
||||
namespace layer
|
||||
{
|
||||
/**
|
||||
* @brief PReLU(input).
|
||||
* @brief PRelu(input).
|
||||
*
|
||||
* @tparam feature_t supports int16_t and int8_t,
|
||||
* - int16_t: stands for operation in int16_t quantize
|
||||
* - int8_t: stands for operation in int8_t quantize
|
||||
*/
|
||||
template <typename feature_t>
|
||||
class PReLU : public Layer
|
||||
class PRelu : public Layer
|
||||
{
|
||||
private:
|
||||
feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
|
||||
const feature_t *activation_element; /*<! quantized alpha elements along channel axis >*/
|
||||
int activation_exponent; /*<! exponent of quantized alpha elements >*/
|
||||
Tensor<feature_t> *output; /*<! output ptr of prelu >*/
|
||||
bool inplace; /*<! true: the output will store to input0
|
||||
@ -28,7 +28,7 @@ namespace dl
|
||||
std::vector<int> output_shape; /*<! output shape of prelu >*/
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new PReLU object
|
||||
* @brief Construct a new PRelu object
|
||||
*
|
||||
* @param activation_element quantized alpha elements along channel axis
|
||||
* @param activation_exponent exponent of quantized alpha elements
|
||||
@ -36,10 +36,10 @@ namespace dl
|
||||
* @param inplace true: the output will store to input0
|
||||
* false: the output will store to a separate memory
|
||||
*/
|
||||
PReLU(const feature_t *activation_element,
|
||||
PRelu(const feature_t *activation_element,
|
||||
const int activation_exponent = 0,
|
||||
const char *name = NULL,
|
||||
bool inplace = "PReLU") : Layer(name),
|
||||
const char *name = "PRelu",
|
||||
bool inplace = false) : Layer(name),
|
||||
activation_element(activation_element),
|
||||
activation_exponent(activation_exponent),
|
||||
output(NULL),
|
||||
@ -49,10 +49,10 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the PReLU object
|
||||
* @brief Destroy the PRelu object
|
||||
*
|
||||
*/
|
||||
~PReLU()
|
||||
~PRelu()
|
||||
{
|
||||
if ((!this->inplace) && (this->output != NULL))
|
||||
{
|
||||
@ -71,7 +71,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -94,7 +94,7 @@ namespace dl
|
||||
/**
|
||||
* @brief Get the output
|
||||
*
|
||||
* @return Tensor<feature_t>& PReLU result
|
||||
* @return Tensor<feature_t>& PRelu result
|
||||
*/
|
||||
Tensor<feature_t> &get_output()
|
||||
{
|
||||
@ -102,11 +102,11 @@ namespace dl
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Call PReLU operation.
|
||||
* @brief Call PRelu operation.
|
||||
*
|
||||
* @param input as an input
|
||||
* @param assign_core not effective yet
|
||||
* @return PReLU result
|
||||
* @return PRelu result
|
||||
*/
|
||||
Tensor<feature_t> &call(Tensor<feature_t> &input, const std::vector<int> &assign_core = CONFIG_DEFAULT_ASSIGN_CORE)
|
||||
{
|
||||
@ -125,7 +125,7 @@ namespace dl
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -135,7 +135,7 @@ namespace dl
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::prelu(*this->output, input, this->activation_element, this->activation_exponent, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "leakyrelu");
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "prelu");
|
||||
}
|
||||
|
||||
return *this->output;
|
||||
|
@ -61,7 +61,7 @@ namespace dl
|
||||
this->output_shape = input.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -64,19 +64,21 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
this->output->set_exponent(this->output_exponent);
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->reshape(this->output_shape);
|
||||
this->output->free_element();
|
||||
}
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(this->output_shape);
|
||||
this->output->reshape(this->output_shape);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
||||
if (print_shape)
|
||||
{
|
||||
|
@ -66,7 +66,7 @@ namespace dl
|
||||
this->output_exponent = input.exponent;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -78,7 +78,6 @@ namespace dl
|
||||
else
|
||||
{
|
||||
this->output = &input;
|
||||
this->output->set_shape(input.shape);
|
||||
this->output->squeeze(this->axis);
|
||||
}
|
||||
this->output_shape = this->output->shape;
|
||||
|
@ -71,7 +71,7 @@ namespace dl
|
||||
this->output_shape = input0.shape;
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
@ -120,12 +120,12 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
this->output.malloc_element();
|
||||
this->output->malloc_element();
|
||||
this->output->set_exponent(input0.exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "apply");
|
||||
|
||||
DL_LOG_LAYER_LATENCY_START();
|
||||
nn::sub2d(this->output, input0, input1, this->activation, assign_core);
|
||||
nn::sub2d(*this->output, input0, input1, this->activation, assign_core);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
else
|
||||
@ -135,7 +135,7 @@ namespace dl
|
||||
{
|
||||
this->output->set_shape(this->output_shape);
|
||||
}
|
||||
nn::sub2d<true>(this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
nn::sub2d(*this->output, input0, input1, this->activation, assign_core, this->output_exponent);
|
||||
DL_LOG_LAYER_LATENCY_END(this->name, "sub2d");
|
||||
}
|
||||
return *this->output;
|
||||
|
@ -63,13 +63,24 @@ namespace dl
|
||||
{
|
||||
this->output_exponent = input.exponent;
|
||||
this->output_shape = input.shape;
|
||||
for (int i = 0; i < this->perm.size(); i++)
|
||||
int dims = this->output_shape.size();
|
||||
if (this->perm.size() == 0)
|
||||
{
|
||||
for (int i = dims - 1; i >= 0; i--)
|
||||
{
|
||||
this->perm.push_back(i);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < dims; ++i)
|
||||
{
|
||||
if (this->perm[i] < 0)
|
||||
this->perm[i] = dims + this->perm[i];
|
||||
this->output_shape[i] = input.shape[this->perm[i]];
|
||||
}
|
||||
|
||||
if (!this->inplace)
|
||||
{
|
||||
if (this->output != NULL)
|
||||
if (this->output == NULL)
|
||||
{
|
||||
this->output = new Tensor<feature_t>;
|
||||
}
|
||||
|
@ -396,9 +396,6 @@ namespace dl
|
||||
* @brief print all the element of the Tensor.
|
||||
*
|
||||
* @param message to print
|
||||
* @param with_padding one of true or false,
|
||||
* - true: the padding element will also be ed
|
||||
* - false: the padding element will not be ed
|
||||
*/
|
||||
void print_all(const char *message = "")
|
||||
{
|
||||
@ -553,4 +550,4 @@ namespace dl
|
||||
return output;
|
||||
}
|
||||
};
|
||||
} // namespace dl
|
||||
} // namespace dl
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -966,8 +966,9 @@ CONFIG_LWIP_MAX_RAW_PCBS=16
|
||||
#
|
||||
# SNTP
|
||||
#
|
||||
CONFIG_LWIP_SNTP_MAX_SERVERS=1
|
||||
# CONFIG_LWIP_DHCP_GET_NTP_SRV is not set
|
||||
CONFIG_LWIP_SNTP_MAX_SERVERS=3
|
||||
CONFIG_LWIP_DHCP_GET_NTP_SRV=y
|
||||
CONFIG_LWIP_DHCP_MAX_NTP_SERVERS=1
|
||||
CONFIG_LWIP_SNTP_UPDATE_DELAY=3600000
|
||||
# end of SNTP
|
||||
|
||||
|
Reference in New Issue
Block a user