mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-02 04:04:31 +02:00
Merge branch 'bugfix/event_loop_library_memory_leak_v3.2' into 'release/v3.2'
Fix event loop library memory leak (backport v3.2) See merge request idf/esp-idf!4221
This commit is contained in:
@@ -286,7 +286,7 @@ static esp_event_base_instance_t* loop_find_event_base_instance(esp_event_loop_i
|
|||||||
// Functions that operate on post instance
|
// Functions that operate on post instance
|
||||||
static esp_err_t post_instance_create(esp_event_base_t event_base, int32_t event_id, void* event_data, int32_t event_data_size, esp_event_post_instance_t* post)
|
static esp_err_t post_instance_create(esp_event_base_t event_base, int32_t event_id, void* event_data, int32_t event_data_size, esp_event_post_instance_t* post)
|
||||||
{
|
{
|
||||||
void** event_data_copy = NULL;
|
void* event_data_copy = NULL;
|
||||||
|
|
||||||
// Make persistent copy of event data on heap.
|
// Make persistent copy of event data on heap.
|
||||||
if (event_data != NULL && event_data_size != 0) {
|
if (event_data != NULL && event_data_size != 0) {
|
||||||
@@ -528,6 +528,8 @@ esp_err_t esp_event_loop_run(esp_event_loop_handle_t event_loop, TickType_t tick
|
|||||||
exec |= true;
|
exec |= true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
post_instance_delete(&post);
|
||||||
|
|
||||||
if (ticks_to_run != portMAX_DELAY) {
|
if (ticks_to_run != portMAX_DELAY) {
|
||||||
end = xTaskGetTickCount();
|
end = xTaskGetTickCount();
|
||||||
remaining_ticks -= end - marker;
|
remaining_ticks -= end - marker;
|
||||||
@@ -559,10 +561,14 @@ esp_err_t esp_event_loop_delete(esp_event_loop_handle_t event_loop)
|
|||||||
|
|
||||||
esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
|
esp_event_loop_instance_t* loop = (esp_event_loop_instance_t*) event_loop;
|
||||||
SemaphoreHandle_t loop_mutex = loop->mutex;
|
SemaphoreHandle_t loop_mutex = loop->mutex;
|
||||||
|
#ifdef CONFIG_EVENT_LOOP_PROFILING
|
||||||
|
SemaphoreHandle_t loop_profiling_mutex = loop->profiling_mutex;
|
||||||
|
#endif
|
||||||
|
|
||||||
xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
|
xSemaphoreTakeRecursive(loop->mutex, portMAX_DELAY);
|
||||||
|
|
||||||
#ifdef CONFIG_EVENT_LOOP_PROFILING
|
#ifdef CONFIG_EVENT_LOOP_PROFILING
|
||||||
|
xSemaphoreTakeRecursive(loop->profiling_mutex, portMAX_DELAY);
|
||||||
portENTER_CRITICAL(&s_event_loops_spinlock);
|
portENTER_CRITICAL(&s_event_loops_spinlock);
|
||||||
SLIST_REMOVE(&s_event_loops, loop, esp_event_loop_instance, loop_entry);
|
SLIST_REMOVE(&s_event_loops, loop, esp_event_loop_instance, loop_entry);
|
||||||
portEXIT_CRITICAL(&s_event_loops_spinlock);
|
portEXIT_CRITICAL(&s_event_loops_spinlock);
|
||||||
@@ -588,6 +594,10 @@ esp_err_t esp_event_loop_delete(esp_event_loop_handle_t event_loop)
|
|||||||
free(loop);
|
free(loop);
|
||||||
// Free loop mutex before deleting
|
// Free loop mutex before deleting
|
||||||
xSemaphoreGiveRecursive(loop_mutex);
|
xSemaphoreGiveRecursive(loop_mutex);
|
||||||
|
#ifdef CONFIG_EVENT_LOOP_PROFILING
|
||||||
|
xSemaphoreGiveRecursive(loop_profiling_mutex);
|
||||||
|
vSemaphoreDelete(loop_profiling_mutex);
|
||||||
|
#endif
|
||||||
vSemaphoreDelete(loop_mutex);
|
vSemaphoreDelete(loop_mutex);
|
||||||
|
|
||||||
ESP_LOGD(TAG, "deleted loop %p", (void*) event_loop);
|
ESP_LOGD(TAG, "deleted loop %p", (void*) event_loop);
|
||||||
|
@@ -90,7 +90,7 @@ typedef struct esp_event_loop_instance {
|
|||||||
typedef struct esp_event_post_instance {
|
typedef struct esp_event_post_instance {
|
||||||
esp_event_base_t base; /**< the event base */
|
esp_event_base_t base; /**< the event base */
|
||||||
int32_t id; /**< the event id */
|
int32_t id; /**< the event id */
|
||||||
void** data; /**< data associated with the event */
|
void* data; /**< data associated with the event */
|
||||||
} esp_event_post_instance_t;
|
} esp_event_post_instance_t;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
@@ -27,14 +27,19 @@ static const char* TAG = "test_event";
|
|||||||
|
|
||||||
#define TEST_CONFIG_WAIT_MULTIPLIER 5
|
#define TEST_CONFIG_WAIT_MULTIPLIER 5
|
||||||
|
|
||||||
|
// The initial logging "initializing test" is to ensure mutex allocation is not counted against memory not being freed
|
||||||
|
// during teardown.
|
||||||
#define TEST_SETUP() \
|
#define TEST_SETUP() \
|
||||||
|
ESP_LOGI(TAG, "initializing test"); \
|
||||||
|
size_t free_mem_before = heap_caps_get_free_size(MALLOC_CAP_DEFAULT); \
|
||||||
test_setup(); \
|
test_setup(); \
|
||||||
s_test_core_id = xPortGetCoreID(); \
|
s_test_core_id = xPortGetCoreID(); \
|
||||||
s_test_priority = uxTaskPriorityGet(NULL); \
|
s_test_priority = uxTaskPriorityGet(NULL);
|
||||||
|
|
||||||
#define TEST_TEARDOWN() \
|
#define TEST_TEARDOWN() \
|
||||||
test_teardown(); \
|
test_teardown(); \
|
||||||
vTaskDelay(pdMS_TO_TICKS(CONFIG_INT_WDT_TIMEOUT_MS * TEST_CONFIG_WAIT_MULTIPLIER));
|
vTaskDelay(pdMS_TO_TICKS(CONFIG_INT_WDT_TIMEOUT_MS * TEST_CONFIG_WAIT_MULTIPLIER)); \
|
||||||
|
TEST_ASSERT_EQUAL(free_mem_before, heap_caps_get_free_size(MALLOC_CAP_DEFAULT));
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
void* data;
|
void* data;
|
||||||
@@ -679,6 +684,10 @@ static void loop_run_task(void* args)
|
|||||||
|
|
||||||
static void performance_test(bool dedicated_task)
|
static void performance_test(bool dedicated_task)
|
||||||
{
|
{
|
||||||
|
// rand() seems to do a one-time allocation. Call it here so that the memory it allocates
|
||||||
|
// is not counted as a leak.
|
||||||
|
unsigned int _rand __attribute__((unused)) = rand();
|
||||||
|
|
||||||
TEST_SETUP();
|
TEST_SETUP();
|
||||||
|
|
||||||
const char test_base[] = "qwertyuiopasdfghjklzxvbnmmnbvcxzqwertyuiopasdfghjklzxvbnmmnbvcxz";
|
const char test_base[] = "qwertyuiopasdfghjklzxvbnmmnbvcxzqwertyuiopasdfghjklzxvbnmmnbvcxz";
|
||||||
@@ -775,6 +784,14 @@ static void performance_test(bool dedicated_task)
|
|||||||
|
|
||||||
int average = (int) (running_sum / (running_count));
|
int average = (int) (running_sum / (running_count));
|
||||||
|
|
||||||
|
if (!dedicated_task) {
|
||||||
|
((esp_event_loop_instance_t*) loop)->task = mtask;
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_ASSERT_EQUAL(ESP_OK, esp_event_loop_delete(loop));
|
||||||
|
|
||||||
|
TEST_TEARDOWN();
|
||||||
|
|
||||||
#ifdef CONFIG_EVENT_LOOP_PROFILING
|
#ifdef CONFIG_EVENT_LOOP_PROFILING
|
||||||
ESP_LOGI(TAG, "events dispatched/second with profiling enabled: %d", average);
|
ESP_LOGI(TAG, "events dispatched/second with profiling enabled: %d", average);
|
||||||
// Enabling profiling will slow down event dispatch, so the set threshold
|
// Enabling profiling will slow down event dispatch, so the set threshold
|
||||||
@@ -786,14 +803,6 @@ static void performance_test(bool dedicated_task)
|
|||||||
TEST_PERFORMANCE_GREATER_THAN(EVENT_DISPATCH_PSRAM, "%d", average);
|
TEST_PERFORMANCE_GREATER_THAN(EVENT_DISPATCH_PSRAM, "%d", average);
|
||||||
#endif // CONFIG_SPIRAM_SUPPORT
|
#endif // CONFIG_SPIRAM_SUPPORT
|
||||||
#endif // CONFIG_EVENT_LOOP_PROFILING
|
#endif // CONFIG_EVENT_LOOP_PROFILING
|
||||||
|
|
||||||
if (!dedicated_task) {
|
|
||||||
((esp_event_loop_instance_t*) loop)->task = mtask;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_ASSERT_EQUAL(ESP_OK, esp_event_loop_delete(loop));
|
|
||||||
|
|
||||||
TEST_TEARDOWN();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("performance test - dedicated task", "[event]")
|
TEST_CASE("performance test - dedicated task", "[event]")
|
||||||
|
Reference in New Issue
Block a user