feat(heap): Add feature to get peak heap usage

This feature keeps track of the per task peak memory usage.

- Update the heap_task_tracking example to make use of the new feature
Cleanup the implementation:
- multi_heap_get_free_size() is never used, remove it.
- Minor update in heap_caps_update_per_task_info_xx() funcitons.
- Update settting on block owner in heap_caps.c to work with the
get peak usage feature.

- Update heap_caps_update_per_task_info_free() to detect when it
is called to delete the memory allocated for a task TCB. Mark
the corresponding task in the statistic list as deleted.

- Add a Kconfig option dependant on HEAP_TASK_TRACKING being enabled
that force the deletion of the statistics related to deleted task
when set to true.

- In task tracking feature, add a current and peak memory usage
to the heap_stat_t structure to keep track of the current and
peak memory usage of the given task across all heaps.

- Fix missing block owner when allocating memory for heaps_array
in heap_caps_init.

- Keep the original implementation of the task tracking
for backward compatibility reasons.
This commit is contained in:
Guillaume Souchere
2025-02-14 08:47:29 +01:00
parent 47df2ed524
commit daf8f9edb6
16 changed files with 1368 additions and 75 deletions

View File

@@ -22,7 +22,8 @@ set(includes "include"
# inside the heap component and are therefore added
# to the list of the private includes from the heap
# component perspective
set(priv_includes "tlsf/include")
set(priv_includes "private_include"
"tlsf/include")
if(NOT CONFIG_HEAP_TLSF_USE_ROM_IMPL)
list(APPEND srcs "tlsf/tlsf.c")
@@ -50,7 +51,6 @@ if(NOT BOOTLOADER_BUILD)
list(APPEND srcs "port/${target}/memory_layout.c")
endif()
idf_component_register(SRCS "${srcs}"
INCLUDE_DIRS ${includes}
PRIV_INCLUDE_DIRS ${priv_includes}

View File

@@ -96,8 +96,21 @@ menu "Heap memory debugging"
help
Enables tracking the task responsible for each heap allocation.
This function depends on heap poisoning being enabled and adds four more bytes of overhead for each block
allocated.
Note: Allocating or freeing memory or using the task tracking API will lead to a crash when the
scheduler is not working (e.g, after calling vTaskSuspendAll).
config HEAP_TRACK_DELETED_TASKS
bool "Keep information about the memory usage of deleted tasks"
depends on HEAP_TASK_TRACKING
default n
help
When enabled, this configuration allows the user to keep trace of the memory usage
of a task that has been deleted.
This allows the user to verify that no memory allocated within a task remains unfreed
before terminating the task
Note that this feature cannot keep track of a task deletion if the task is allocated statically
config HEAP_ABORT_WHEN_ALLOCATION_FAILS
bool "Abort if memory allocation fails"

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -12,6 +12,11 @@
#include "multi_heap.h"
#include "esp_log.h"
#include "heap_private.h"
#if CONFIG_HEAP_TASK_TRACKING
#include "esp_heap_task_info.h"
#include "esp_heap_task_info_internal.h"
#include "multi_heap_internal.h"
#endif
#ifdef CONFIG_HEAP_USE_HOOKS
#define CALL_HOOK(hook, ...) { \
@@ -72,6 +77,11 @@ HEAP_IRAM_ATTR void heap_caps_free( void *ptr)
void *block_owner_ptr = MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(ptr);
heap_t *heap = find_containing_heap(block_owner_ptr);
assert(heap != NULL && "free() target pointer is outside heap areas");
#if CONFIG_HEAP_TASK_TRACKING
heap_caps_update_per_task_info_free(heap, ptr);
#endif
multi_heap_free(heap->heap, block_owner_ptr);
CALL_HOOK(esp_heap_trace_free_hook, ptr);
@@ -145,6 +155,13 @@ HEAP_IRAM_ATTR NOINLINE_ATTR void *heap_caps_aligned_alloc_base(size_t alignment
ret = aligned_or_unaligned_alloc(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size) + 4,
alignment, MULTI_HEAP_BLOCK_OWNER_SIZE()); // int overflow checked above
if (ret != NULL) {
#if CONFIG_HEAP_TASK_TRACKING
heap_caps_update_per_task_info_alloc(heap,
MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret),
multi_heap_get_full_block_size(heap->heap, ret),
get_all_caps(heap));
#endif
MULTI_HEAP_SET_BLOCK_OWNER(ret);
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
uint32_t *iptr = dram_alloc_to_iram_addr(ret, size + 4); // int overflow checked above
@@ -156,6 +173,13 @@ HEAP_IRAM_ATTR NOINLINE_ATTR void *heap_caps_aligned_alloc_base(size_t alignment
ret = aligned_or_unaligned_alloc(heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size),
alignment, MULTI_HEAP_BLOCK_OWNER_SIZE());
if (ret != NULL) {
#if CONFIG_HEAP_TASK_TRACKING
heap_caps_update_per_task_info_alloc(heap,
MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret),
multi_heap_get_full_block_size(heap->heap, ret),
get_all_caps(heap));
#endif
MULTI_HEAP_SET_BLOCK_OWNER(ret);
ret = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ret);
CALL_HOOK(esp_heap_trace_alloc_hook, ret, size, caps);
@@ -240,9 +264,25 @@ HEAP_IRAM_ATTR NOINLINE_ATTR void *heap_caps_realloc_base( void *ptr, size_t siz
if (compatible_caps && !ptr_in_diram_case && alignment<=UNALIGNED_MEM_ALIGNMENT_BYTES) {
// try to reallocate this memory within the same heap
// (which will resize the block if it can)
#if CONFIG_HEAP_TASK_TRACKING
size_t old_size = multi_heap_get_full_block_size(heap->heap, ptr);
TaskHandle_t old_task = MULTI_HEAP_GET_BLOCK_OWNER(ptr);
#endif
void *r = multi_heap_realloc(heap->heap, ptr, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(size));
if (r != NULL) {
MULTI_HEAP_SET_BLOCK_OWNER(r);
#if CONFIG_HEAP_TASK_TRACKING
heap_caps_update_per_task_info_realloc(heap,
MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(ptr),
MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(r),
old_size, old_task,
multi_heap_get_full_block_size(heap->heap, r),
get_all_caps(heap));
#endif
r = MULTI_HEAP_ADD_BLOCK_OWNER_OFFSET(r);
CALL_HOOK(esp_heap_trace_alloc_hook, r, size, caps);
return r;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -12,6 +12,7 @@
#include "multi_heap.h"
#include "multi_heap_platform.h"
#include "esp_heap_caps_init.h"
#include "esp_heap_task_info_internal.h"
#include "heap_memory_layout.h"
#include "esp_private/startup_internal.h"
@@ -144,6 +145,10 @@ void heap_caps_init(void)
heap_idx++;
assert(heap_idx <= num_heaps);
// add the name of the newly created heap to match the region name in which it will be created
#if CONFIG_HEAP_TASK_TRACKING
heap->name = type->name;
#endif // CONFIG_HEAP_TASK_TRACKING
memcpy(heap->caps, type->caps, sizeof(heap->caps));
heap->start = region->start;
heap->end = region->start + region->size;
@@ -168,13 +173,15 @@ void heap_caps_init(void)
assert(SLIST_EMPTY(&registered_heaps));
heap_t *heaps_array = NULL;
heap_t *used_heap = NULL;
for (size_t i = 0; i < num_heaps; i++) {
if (heap_caps_match(&temp_heaps[i], MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
used_heap = temp_heaps + i;
if (heap_caps_match(used_heap, MALLOC_CAP_8BIT|MALLOC_CAP_INTERNAL)) {
/* use the first DRAM heap which can fit the data.
* the allocated block won't include the block owner bytes since this operation
* is done by the top level API heap_caps_malloc(). So we need to add it manually
* after successful allocation. Allocate extra 4 bytes for that purpose. */
heaps_array = multi_heap_malloc(temp_heaps[i].heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(sizeof(heap_t) * num_heaps));
heaps_array = multi_heap_malloc(used_heap->heap, MULTI_HEAP_ADD_BLOCK_OWNER_SIZE(sizeof(heap_t) * num_heaps));
if (heaps_array != NULL) {
break;
}
@@ -199,6 +206,13 @@ void heap_caps_init(void)
* until the smaller heaps are full. */
sorted_add_to_registered_heaps(&heaps_array[i]);
}
#if CONFIG_HEAP_TASK_TRACKING
heap_caps_update_per_task_info_alloc(used_heap,
MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(heaps_array),
multi_heap_get_full_block_size(used_heap->heap, MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(heaps_array)),
get_all_caps(used_heap));
#endif
}
esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
@@ -279,6 +293,15 @@ esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start,
err = ESP_ERR_NO_MEM;
goto done;
}
#if CONFIG_HEAP_TASK_TRACKING
// add the name of the newly created heap to match the region name in which it will be created
for(size_t i = 0; i < soc_memory_type_count; i++) {
if (get_ored_caps(caps) == get_ored_caps(soc_memory_types[i].caps)) {
p_new->name = soc_memory_types[i].name;
break;
}
}
#endif // CONFIG_HEAP_TASK_TRACKING
memcpy(p_new->caps, caps, sizeof(p_new->caps));
p_new->start = start;
p_new->end = end;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -25,6 +25,9 @@ extern "C" {
/* Type for describing each registered heap */
typedef struct heap_t_ {
#if CONFIG_HEAP_TASK_TRACKING
const char *name;
#endif // CONFIG_HEAP_TASK_TRACKING
uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS]; ///< Capabilities for the type of memory in this heap (as a prioritised set). Copied from soc_memory_types so it's in RAM not flash.
intptr_t start;
intptr_t end;
@@ -43,17 +46,22 @@ extern SLIST_HEAD(registered_heap_ll, heap_t_) registered_heaps;
bool heap_caps_match(const heap_t *heap, uint32_t caps);
FORCE_INLINE_ATTR uint32_t get_ored_caps(const uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS])
{
uint32_t all_caps = 0;
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
all_caps |= caps[prio];
}
return all_caps;
}
/* return all possible capabilities (across all priorities) for a given heap */
FORCE_INLINE_ATTR uint32_t get_all_caps(const heap_t *heap)
{
if (heap->heap == NULL) {
return 0;
}
uint32_t all_caps = 0;
for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) {
all_caps |= heap->caps[prio];
}
return all_caps;
return get_ored_caps(heap->caps);
}
/* Find the heap which belongs to ptr, or return NULL if it's

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2018-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -7,12 +7,878 @@
#include <freertos/FreeRTOS.h>
#include <freertos/task.h>
#include <multi_heap.h>
#include <string.h>
#include "multi_heap_internal.h"
#include "heap_private.h"
#include "esp_heap_task_info.h"
#include "esp_heap_task_info_internal.h"
#include "heap_memory_layout.h"
#include "esp_log.h"
#ifdef CONFIG_HEAP_TASK_TRACKING
const static char *TAG = "heap_task_tracking";
static SemaphoreHandle_t s_task_tracking_mutex = NULL;
static StaticSemaphore_t s_task_tracking_mutex_buf;
typedef struct alloc_stats {
heap_task_block_t alloc_stat;
STAILQ_ENTRY(alloc_stats) next_alloc_stat;
} alloc_stats_t;
/**
* @brief Internal singly linked list used to gather information of the heap used
* by a given task.
*/
typedef struct heap_stats {
multi_heap_handle_t heap;
heap_stat_t heap_stat;
STAILQ_HEAD(alloc_stats_ll, alloc_stats) allocs_stats;
STAILQ_ENTRY(heap_stats) next_heap_stat;
} heap_stats_t;
/** @brief Internal singly linked list used to gather information on all created
* tasks since startup.
*/
typedef struct task_stats {
task_stat_t task_stat;
STAILQ_HEAD(heap_stats_ll, heap_stats) heaps_stats;
SLIST_ENTRY(task_stats) next_task_info;
} task_info_t;
static SLIST_HEAD(task_stats_ll, task_stats) task_stats = SLIST_HEAD_INITIALIZER(task_stats);
FORCE_INLINE_ATTR heap_t* find_biggest_heap(void)
{
heap_t *heap = NULL;
heap_t *biggest_heap = NULL;
SLIST_FOREACH(heap, &registered_heaps, next) {
if (biggest_heap == NULL) {
biggest_heap = heap;
} else if ((biggest_heap->end - biggest_heap->start) < (heap->end - heap->start)) {
biggest_heap = heap;
} else {
// nothing to do here
}
}
return biggest_heap;
}
/**
* @brief Create a new alloc stats entry object
*
* @param heap_stats The heap statistics of the heap used for the allocation
* @param task_handle The task handler of the task which performed the allocation
* @param ptr The address of the allocation
* @param size The size of the allocation
*/
static HEAP_IRAM_ATTR void create_new_alloc_stats_entry(heap_stats_t *heap_stats, alloc_stats_t *alloc_stats, TaskHandle_t task_handle, void *ptr, size_t size)
{
// init the list of allocs with a new entry in heap_stats->allocs_stats. No need
// to memset the memory since all field will be set later in the function.
if (!alloc_stats) {
// find the heap with the most available free memory to store the statistics
heap_t *heap_used_for_alloc = find_biggest_heap();
alloc_stats = multi_heap_malloc(heap_used_for_alloc->heap, sizeof(alloc_stats_t));
if (!alloc_stats) {
ESP_LOGE(TAG, "Could not allocate memory to add new task statistics");
return;
}
}
alloc_stats->alloc_stat.task = task_handle;
alloc_stats->alloc_stat.address = ptr;
alloc_stats->alloc_stat.size = size;
STAILQ_INSERT_TAIL(&heap_stats->allocs_stats, alloc_stats, next_alloc_stat);
}
/**
* @brief Create a new heap stats entry object
*
* @param task_stats The task statistics of the task that triggered the allocation
* @param used_heap Information about the heap used for the allocation
* @param caps The caps of the heap used for the allocation
* @param size The size of the allocation
*/
static HEAP_IRAM_ATTR void create_new_heap_stats_entry(task_info_t *task_stats, heap_t *used_heap, void *ptr, uint32_t caps, size_t size)
{
// find the heap with the most available free memory to store the statistics
heap_t *heap_used_for_alloc = find_biggest_heap();
// init the list of heap with a new entry in task_stats->heaps_stats. No need
// to memset the memory since all field will be set later in the function.
heap_stats_t *heap_stats = multi_heap_malloc(heap_used_for_alloc->heap, sizeof(heap_stats_t));
if (!heap_stats) {
ESP_LOGE(TAG, "Could not allocate memory to add new task statistics");
return;
}
// create the alloc stats for the new heap entry
STAILQ_INIT(&heap_stats->allocs_stats);
task_stats->task_stat.heap_count += 1;
heap_stats->heap = used_heap->heap;
heap_stats->heap_stat.name = used_heap->name;
heap_stats->heap_stat.size = used_heap->end - used_heap->start;
heap_stats->heap_stat.caps = caps;
heap_stats->heap_stat.current_usage = size;
heap_stats->heap_stat.peak_usage = size;
heap_stats->heap_stat.alloc_count = 1;
heap_stats->heap_stat.alloc_stat = NULL; // this will be used to point at the user defined array of alloc_stat
STAILQ_INSERT_TAIL(&task_stats->heaps_stats, heap_stats, next_heap_stat);
create_new_alloc_stats_entry(heap_stats, NULL, task_stats->task_stat.handle, ptr, size);
}
/**
* @brief Create a new task info entry in task_stats if the tasks allocating memory is not in task_stats already.
*
* @param heap The heap by the task to allocate memory
* @param task_handle The task handle of the task allocating memory
* @param task_stats The task entry in task_stats. If NULL, the task allocating memory is allocating for the first time
* @param ptr The address of the allocation
* @param size The size of the allocation
* @param caps The ORED caps of the heap used for the allocation
*/
static HEAP_IRAM_ATTR void create_new_task_stats_entry(heap_t *used_heap, TaskHandle_t task_handle, task_info_t *task_info, void *ptr, size_t size, uint32_t caps)
{
// If task_info passed as parameter is NULL, it means the this task is doing
// its first allocation. Add the task entry to task_info and add heap_stats
// to this new task_info entry.
// If task_info is not NULL, it means that the task already allocated memory
// but now it is allocating in a new heap for the first time. Don't add a new
// task entry to task_info but add a new heap_stats to the task_info
if (!task_info) {
// find the heap with the most available free memory to store the statistics
heap_t *heap_used_for_alloc = find_biggest_heap();
// create the task_stats entry. No need to memset since all fields are set later
task_info = multi_heap_malloc(heap_used_for_alloc->heap, sizeof(task_info_t));
if (!task_info) {
ESP_LOGE(TAG, "Could not allocate memory to add new task statistics");
return;
}
// create the heap stats for the new task entry
STAILQ_INIT(&task_info->heaps_stats);
task_info->task_stat.handle = task_handle;
task_info->task_stat.is_alive = true;
task_info->task_stat.overall_peak_usage = size;
task_info->task_stat.overall_current_usage = size;
task_info->task_stat.heap_count = 0;
task_info->task_stat.heap_stat = NULL; // this will be used to point at the user defined array of heap_stat
if (task_handle == 0x00) {
char task_name[] = "Pre-scheduler";
strcpy(task_info->task_stat.name, task_name);
} else {
strcpy(task_info->task_stat.name, pcTaskGetName(task_handle));
}
// Add the new / first task_info in the list (sorted by decreasing address).
// The decreasing order is chosen because the task_handle 0x00000000 is used for pre-scheduler
// operations and therefore need to appear last so it is not parsed when trying to find a suitable
// task to update the stats from.
if (SLIST_EMPTY(&task_stats) || task_info->task_stat.handle >= SLIST_FIRST(&task_stats)->task_stat.handle) {
// the list is empty, or the new task handler is at a higher address than the one from the first item
SLIST_INSERT_HEAD(&task_stats, task_info, next_task_info);
} else {
// the new task handle is at a lower address than the first item in the list, go through the list to
// properly insert the new item
task_info_t *cur_task_info = NULL;
task_info_t *prev_task_info = NULL;
SLIST_FOREACH(cur_task_info, &task_stats, next_task_info) {
if (cur_task_info->task_stat.handle < task_info->task_stat.handle) {
SLIST_INSERT_AFTER(prev_task_info, task_info, next_task_info);
break;
} else {
prev_task_info = cur_task_info;
}
}
// here should be a last case handling: new task info as a task handle address smaller than all existing
// items in the list. But this is case is impossible given that the pre-scheduler allocations always
// happen first and the task handle defaults to 0x00000000 for the pre-scheduler so it will always be
// last in the list.
}
}
create_new_heap_stats_entry(task_info, used_heap, ptr, caps, size);
}
#if !CONFIG_HEAP_TRACK_DELETED_TASKS
/**
* @brief Delete an entry from the list of task statistics
*
* @param task_info The task statistics to delete from the list of task statistics
*/
static HEAP_IRAM_ATTR void delete_task_info_entry(task_info_t *task_info)
{
if (task_info == NULL) {
return;
}
heap_stats_t *current_heap_stat = STAILQ_FIRST(&task_info->heaps_stats);
heap_stats_t *prev_heap_stat = NULL;
// pointer used to free the memory of the statistics
heap_t *containing_heap = NULL;
// remove all entries from task_info->heaps_stats and free the memory
while(current_heap_stat != NULL) {
prev_heap_stat = current_heap_stat;
current_heap_stat = STAILQ_NEXT(current_heap_stat, next_heap_stat);
/* remove all entries from heap_stats->allocs_stats */
alloc_stats_t *alloc_stat = NULL;
while ((alloc_stat = STAILQ_FIRST( &prev_heap_stat->allocs_stats)) != NULL) {
STAILQ_REMOVE(&prev_heap_stat->allocs_stats, alloc_stat, alloc_stats, next_alloc_stat);
containing_heap = find_containing_heap(alloc_stat);
// prev_heap_stat must be allocated somewhere
if (containing_heap != NULL) {
multi_heap_free(containing_heap->heap, alloc_stat);
}
}
if (STAILQ_EMPTY(&prev_heap_stat->allocs_stats)) {
STAILQ_REMOVE(&task_info->heaps_stats, prev_heap_stat, heap_stats, next_heap_stat);
containing_heap = find_containing_heap(prev_heap_stat);
// prev_heap_stat must be allocated somewhere
if (containing_heap != NULL) {
multi_heap_free(containing_heap->heap, prev_heap_stat);
}
}
}
if (STAILQ_EMPTY(&task_info->heaps_stats)) {
// remove task_info from task_stats (and free the memory)
SLIST_REMOVE(&task_stats, task_info, task_stats, next_task_info);
containing_heap = find_containing_heap(task_info);
if (containing_heap != NULL) {
multi_heap_free(containing_heap->heap, task_info);
}
}
}
#endif // !CONFIG_HEAP_TRACK_DELETED_TASKS
HEAP_IRAM_ATTR void heap_caps_update_per_task_info_alloc(heap_t *heap, void *ptr, size_t size, uint32_t caps)
{
if (s_task_tracking_mutex == NULL) {
s_task_tracking_mutex = xSemaphoreCreateMutexStatic(&s_task_tracking_mutex_buf);
assert(s_task_tracking_mutex);
}
TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
/* find the task in the list and update the overall stats */
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
if (task_info->task_stat.handle == task_handle && task_info->task_stat.is_alive) {
task_info->task_stat.overall_current_usage += size;
if (task_info->task_stat.overall_current_usage > task_info->task_stat.overall_peak_usage) {
task_info->task_stat.overall_peak_usage = task_info->task_stat.overall_current_usage;
}
heap_stats_t *heap_stats = NULL;
/* find the heap in the list and update the overall stats */
STAILQ_FOREACH(heap_stats, &task_info->heaps_stats, next_heap_stat) {
if (heap_stats->heap == heap->heap) {
heap_stats->heap_stat.current_usage += size;
heap_stats->heap_stat.alloc_count++;
if (heap_stats->heap_stat.current_usage > heap_stats->heap_stat.peak_usage) {
heap_stats->heap_stat.peak_usage = heap_stats->heap_stat.current_usage;
}
/* add the alloc info to the list */
create_new_alloc_stats_entry(heap_stats, NULL, task_handle, ptr, size);
xSemaphoreGive(s_task_tracking_mutex);
return;
}
}
break;
}
// since the list of task info is sorted by decreasing size, if the current task info
// has a smaller task handle address than the one we are checking against, we can be sure
// the task handle will not be found in the list, and we can break the loop.
if (task_info->task_stat.handle < task_handle) {
task_info = NULL;
break;
}
}
// No task entry was found OR no heap in the task entry was found.
// Add the info to the list (either new task stats or new heap stat if task_info not NULL)
create_new_task_stats_entry(heap, task_handle, task_info, ptr, size, caps);
xSemaphoreGive(s_task_tracking_mutex);
}
HEAP_IRAM_ATTR void heap_caps_update_per_task_info_realloc(heap_t *heap, void *old_ptr, void *new_ptr,
size_t old_size, TaskHandle_t old_task,
size_t new_size, uint32_t caps)
{
TaskHandle_t task_handle = xTaskGetCurrentTaskHandle();
bool task_in_list = false;
task_info_t *task_info = NULL;
alloc_stats_t *alloc_stat = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
if (task_info->task_stat.handle == old_task) {
heap_stats_t *heap_stats = NULL;
task_info->task_stat.overall_current_usage -= old_size;
STAILQ_FOREACH(heap_stats, &task_info->heaps_stats, next_heap_stat) {
if (heap_stats->heap == heap->heap) {
heap_stats->heap_stat.current_usage -= old_size;
heap_stats->heap_stat.alloc_count--;
/* remove the alloc from the list. The updated alloc stats are added later
* in the function */
STAILQ_FOREACH(alloc_stat, &heap_stats->allocs_stats, next_alloc_stat) {
if (alloc_stat->alloc_stat.address == old_ptr) {
STAILQ_REMOVE(&heap_stats->allocs_stats, alloc_stat, alloc_stats, next_alloc_stat);
/* keep the memory used to store alloc_stat since we will fill it with new alloc
* info later in the function */
break;
}
}
break;
}
}
}
if (task_info->task_stat.handle == task_handle && task_info->task_stat.is_alive) {
heap_stats_t *heap_stats = NULL;
task_info->task_stat.overall_current_usage += new_size;
STAILQ_FOREACH(heap_stats, &task_info->heaps_stats, next_heap_stat) {
if (heap_stats->heap == heap->heap) {
heap_stats->heap_stat.current_usage += new_size;
heap_stats->heap_stat.alloc_count++;
if (heap_stats->heap_stat.current_usage > heap_stats->heap_stat.peak_usage) {
heap_stats->heap_stat.peak_usage = heap_stats->heap_stat.current_usage;
}
create_new_alloc_stats_entry(heap_stats, alloc_stat, task_handle, new_ptr, new_size);
break;
}
}
task_in_list = true;
}
if (task_info->task_stat.overall_current_usage > task_info->task_stat.overall_peak_usage) {
task_info->task_stat.overall_peak_usage = task_info->task_stat.overall_current_usage;
}
}
if (!task_in_list) {
// No task entry was found OR no heap in the task entry was found.
// Add the info to the list (either new task stats or new heap stat if task_info not NULL)
create_new_task_stats_entry(heap, task_handle, task_info, new_ptr, new_size, caps);
}
xSemaphoreGive(s_task_tracking_mutex);
}
HEAP_IRAM_ATTR void heap_caps_update_per_task_info_free(heap_t *heap, void *ptr)
{
void *block_owner_ptr = MULTI_HEAP_REMOVE_BLOCK_OWNER_OFFSET(ptr);
TaskHandle_t task_handle = MULTI_HEAP_GET_BLOCK_OWNER(block_owner_ptr);
if (!task_handle) {
return;
}
task_info_t *task_info = NULL;
#if !CONFIG_HEAP_TRACK_DELETED_TASKS
task_info_t *task_info_to_delete = NULL;
#endif // !CONFIG_HEAP_TRACK_DELETED_TASKS
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
/* find the matching task */
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
/* check all tasks (alive and deleted) since the free can come from any tasks,
* not necessarily the one which allocated the memory. */
if (task_info->task_stat.handle == task_handle) {
heap_stats_t *heap_stats = NULL;
alloc_stats_t *alloc_stat = NULL;
/* find the matching heap */
STAILQ_FOREACH(heap_stats, &task_info->heaps_stats, next_heap_stat) {
if(heap_stats->heap == heap->heap) {
/* find the matching allocation and remove it from the list*/
STAILQ_FOREACH(alloc_stat, &heap_stats->allocs_stats, next_alloc_stat) {
if (alloc_stat->alloc_stat.address == ptr) {
STAILQ_REMOVE(&heap_stats->allocs_stats, alloc_stat, alloc_stats, next_alloc_stat);
/* keep the memory used to store alloc_stat since we will fill it with new alloc
* info later in the function */
break;
}
}
if (alloc_stat != NULL) {
heap_stats->heap_stat.alloc_count--;
heap_stats->heap_stat.current_usage -= alloc_stat->alloc_stat.size;
task_info->task_stat.overall_current_usage -= alloc_stat->alloc_stat.size;
}
}
}
/* free the memory used to store alloc_stat */
heap_t *containing_heap = find_containing_heap(alloc_stat);
// task_stats must be allocated somewhere
if (containing_heap != NULL) {
multi_heap_free(containing_heap->heap, alloc_stat);
}
}
// when a task is deleted, esp_caps_free is called to delete the TCB of the task from vTaskDelete.
// Try to make a TaskHandle out of ptr and compare it to the list of tasks in task_stats.
// If one task_info contains the newly made TaskHandle from ptr it means that esp_caps_free
// was indeed called from vTaskDelete. We can then update the task_stats by marking the corresponding
// task as deleted.
if (task_info->task_stat.handle == ptr) {
// we found the task info from the task that is being deleted.
task_info->task_stat.is_alive = false;
#if !CONFIG_HEAP_TRACK_DELETED_TASKS
task_info_to_delete = task_info;
#endif // !CONFIG_HEAP_TRACK_DELETED_TASKS
}
}
#if !CONFIG_HEAP_TRACK_DELETED_TASKS
// remove the entry related to the task that was just deleted.
if (task_info_to_delete != NULL) {
delete_task_info_entry(task_info_to_delete);
}
#endif // !CONFIG_HEAP_TRACK_DELETED_TASKS
xSemaphoreGive(s_task_tracking_mutex);
}
esp_err_t heap_caps_get_all_task_stat(heap_all_tasks_stat_t *tasks_stat)
{
if (tasks_stat == NULL ||
(tasks_stat->stat_arr == NULL && tasks_stat->task_count != 0) ||
(tasks_stat->heap_stat_start == NULL && tasks_stat->heap_count != 0) ||
(tasks_stat->alloc_stat_start == NULL && tasks_stat->alloc_count != 0)) {
return ESP_ERR_INVALID_ARG;
}
size_t task_index = 0;
size_t heap_index = 0;
size_t alloc_index = 0;
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
// If there is no more task stat entries available in tasks_stat->stat_arr
// break the loop and return the function.
if (task_index >= tasks_stat->task_count) {
break;
}
memcpy(tasks_stat->stat_arr + task_index, &task_info->task_stat, sizeof(task_stat_t));
task_stat_t *current_task_stat = tasks_stat->stat_arr + task_index;
task_index++;
// If no more heap stat entries in the array are available, just proceed
// with filling task stats but skip filling info on heap stat and alloc stat.
if (heap_index + task_info->task_stat.heap_count > tasks_stat->heap_count) {
current_task_stat->heap_stat = NULL;
continue;
}
// set the pointer where the heap info for the given task will
// be in the user array
current_task_stat->heap_stat = tasks_stat->heap_stat_start + heap_index;
heap_index += task_info->task_stat.heap_count;
// copy the stats of the different heaps the task has used and the different allocs
// allocated in those heaps. If the number of entries remaining for alloc stats is
// inferior to the number of allocs allocated on the current heap no alloc stat will
// be copied at all.
size_t h_index = 0;
heap_stats_t *heap_info = STAILQ_FIRST(&task_info->heaps_stats);
while(h_index < task_info->task_stat.heap_count || heap_info != NULL) {
// increase alloc_index before filling the alloc info of the given heap
// to avoid running out of alloc stat entry while doing it.
if (alloc_index + heap_info->heap_stat.alloc_count > tasks_stat->alloc_count) {
heap_info->heap_stat.alloc_stat = NULL;
} else {
// set the pointer where the alloc info for the given heap will
// be in the user array
heap_info->heap_stat.alloc_stat = tasks_stat->alloc_stat_start + alloc_index;
// fill the alloc array in heap_info by running through all blocks of a given heap
// and storing info about the blocks allocated by the given task
alloc_stats_t *alloc_stats = NULL;
size_t a_index = 0;
STAILQ_FOREACH(alloc_stats, &heap_info->allocs_stats, next_alloc_stat) {
heap_info->heap_stat.alloc_stat[a_index] = alloc_stats->alloc_stat;
a_index++;
}
alloc_index += heap_info->heap_stat.alloc_count;
}
memcpy(current_task_stat->heap_stat + h_index, &heap_info->heap_stat, sizeof(heap_stat_t));
h_index++;
heap_info = STAILQ_NEXT(heap_info, next_heap_stat);
}
}
xSemaphoreGive(s_task_tracking_mutex);
tasks_stat->task_count = task_index;
tasks_stat->heap_count = heap_index;
tasks_stat->alloc_count = alloc_index;
return ESP_OK;
}
esp_err_t heap_caps_get_single_task_stat(heap_single_task_stat_t *task_stat, TaskHandle_t task_handle)
{
if (task_stat == NULL ||
(task_stat->heap_stat_start == NULL && task_stat->heap_count != 0) ||
(task_stat->alloc_stat_start == NULL && task_stat->alloc_count != 0)) {
return ESP_ERR_INVALID_ARG;
}
if (task_handle == NULL) {
task_handle = xTaskGetCurrentTaskHandle();
}
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
if(task_info->task_stat.handle == task_handle) {
// copy the task_stat of the task itself
memcpy(&task_stat->stat, &task_info->task_stat, sizeof(task_stat_t));
break;
}
}
xSemaphoreGive(s_task_tracking_mutex);
if (task_info == NULL) {
return ESP_FAIL;
}
task_stat->stat.heap_stat = task_stat->heap_stat_start;
// copy the stats of the different heaps the task has used and the different blocks
// allocated in those heaps. If the number of entries remaining for block stats is
// inferior to the number of blocks allocated on the current heap no block stat will
// be copied at all.
size_t heap_index = 0;
size_t alloc_index = 0;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
heap_stats_t *heap_info = STAILQ_FIRST(&task_info->heaps_stats);
while(heap_index < task_info->task_stat.heap_count || heap_info != NULL) {
// check that there is enough heap_stat entry left to add another one to the user defined
// array of heap_stat
if (heap_index >= task_stat->heap_count) {
break;
}
// increase alloc_index before filling the block info of the given heap
// to avoid running out of block stat entry while doing it.
if (alloc_index + heap_info->heap_stat.alloc_count > task_stat->alloc_count) {
heap_info->heap_stat.alloc_stat = NULL;
} else {
// set the pointer where the block info for the given heap will
// be in the user array
heap_info->heap_stat.alloc_stat = task_stat->alloc_stat_start + alloc_index;
// fill the alloc array in heap_info by running through all blocks of a given heap
// and storing info about the blocks allocated by the given task
alloc_stats_t *alloc_stats = NULL;
size_t a_index = 0;
STAILQ_FOREACH(alloc_stats, &heap_info->allocs_stats, next_alloc_stat) {
heap_info->heap_stat.alloc_stat[a_index] = alloc_stats->alloc_stat;
a_index++;
}
alloc_index += heap_info->heap_stat.alloc_count;
}
memcpy(task_stat->stat.heap_stat + heap_index, &heap_info->heap_stat, sizeof(heap_stat_t));
heap_index++;
heap_info = STAILQ_NEXT(heap_info, next_heap_stat);
}
xSemaphoreGive(s_task_tracking_mutex);
task_stat->heap_count = heap_index;
task_stat->alloc_count = alloc_index;
return ESP_OK;
}
static void heap_caps_print_task_info(task_info_t *task_info, bool is_last_task_info)
{
const char *task_info_visual = is_last_task_info ? " " : "";
const char *task_info_visual_start = is_last_task_info ? "" : "";
esp_rom_printf("%s %s: %s, CURRENT MEMORY USAGE %d, PEAK MEMORY USAGE %d, TOTAL HEAP USED %d:\n", task_info_visual_start,
task_info->task_stat.is_alive ? "ALIVE" : "DELETED",
task_info->task_stat.name,
task_info->task_stat.overall_current_usage,
task_info->task_stat.overall_peak_usage,
task_info->task_stat.heap_count);
heap_stats_t *heap_info = NULL;
STAILQ_FOREACH(heap_info, &task_info->heaps_stats, next_heap_stat) {
char *next_heap_visual = !STAILQ_NEXT(heap_info, next_heap_stat) ? " " : "";
char *next_heap_visual_start = !STAILQ_NEXT(heap_info, next_heap_stat) ? "" : "";
esp_rom_printf("%s %s HEAP: %s, CAPS: 0x%08lx, SIZE: %d, USAGE: CURRENT %d (%d%%), PEAK %d (%d%%), ALLOC COUNT: %d\n",
task_info_visual,
next_heap_visual_start,
heap_info->heap_stat.name,
heap_info->heap_stat.caps,
heap_info->heap_stat.size,
heap_info->heap_stat.current_usage,
(heap_info->heap_stat.current_usage * 100) / heap_info->heap_stat.size,
heap_info->heap_stat.peak_usage,
(heap_info->heap_stat.peak_usage * 100) / heap_info->heap_stat.size,
heap_info->heap_stat.alloc_count);
alloc_stats_t *alloc_stats = NULL;
STAILQ_FOREACH(alloc_stats, &heap_info->allocs_stats, next_alloc_stat) {
esp_rom_printf("%s %s ├ ALLOC %p, SIZE %d\n", task_info_visual,
next_heap_visual,
alloc_stats->alloc_stat.address,
alloc_stats->alloc_stat.size);
}
}
}
static void heap_caps_print_task_overview(task_info_t *task_info, bool is_first_task_info, bool is_last_task_info)
{
if (is_first_task_info) {
esp_rom_printf("┌────────────────────┬─────────┬──────────────────────┬───────────────────┬─────────────────┐\n");
esp_rom_printf("│ TASK │ STATUS │ CURRENT MEMORY USAGE │ PEAK MEMORY USAGE │ TOTAL HEAP USED │\n");
esp_rom_printf("├────────────────────┼─────────┼──────────────────────┼───────────────────┼─────────────────┤\n");
}
task_stat_t task_stat = task_info->task_stat;
esp_rom_printf("│ %18s │ %7s │ %20d │ %17d │ %15d │\n",
task_stat.name,
task_stat.is_alive ? "ALIVE " : "DELETED",
task_stat.overall_current_usage,
task_stat.overall_peak_usage,
task_stat.heap_count);
if (is_last_task_info) {
esp_rom_printf("└────────────────────┴─────────┴──────────────────────┴───────────────────┴─────────────────┘\n");
}
}
void heap_caps_print_single_task_stat(TaskHandle_t task_handle)
{
if (task_handle == NULL) {
task_handle = xTaskGetCurrentTaskHandle();
}
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
if (task_info->task_stat.handle == task_handle) {
heap_caps_print_task_info(stream, task_info, true);
xSemaphoreGive(s_task_tracking_mutex);
return;
}
}
xSemaphoreGive(s_task_tracking_mutex);
}
void heap_caps_print_all_task_stat(void)
{
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
const bool last_task_info = (SLIST_NEXT(task_info, next_task_info) == NULL);
heap_caps_print_task_info(stream, task_info, last_task_info);
}
xSemaphoreGive(s_task_tracking_mutex);
}
void heap_caps_print_single_task_stat_overview(TaskHandle_t task_handle)
{
if (task_handle == NULL) {
task_handle = xTaskGetCurrentTaskHandle();
}
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
if (task_info->task_stat.handle == task_handle) {
heap_caps_print_task_overview(stream, task_info, true, true);
xSemaphoreGive(s_task_tracking_mutex);
return;
}
}
xSemaphoreGive(s_task_tracking_mutex);
}
void heap_caps_print_all_task_stat_overview(void)
{
task_info_t *task_info = NULL;
bool is_first_task_info = true;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
const bool last_task_info = (SLIST_NEXT(task_info, next_task_info) == NULL);
heap_caps_print_task_overview(stream, task_info, is_first_task_info, last_task_info);
is_first_task_info = false;
}
xSemaphoreGive(s_task_tracking_mutex);
}
esp_err_t heap_caps_alloc_single_task_stat_arrays(heap_single_task_stat_t *task_stat, TaskHandle_t task_handle)
{
if (task_handle == NULL) {
task_handle = xTaskGetCurrentTaskHandle();
}
task_stat->heap_stat_start = NULL;
task_stat->alloc_stat_start = NULL;
task_stat->heap_count = 0;
task_stat->alloc_count = 0;
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
if(task_info->task_stat.handle == task_handle && task_info->task_stat.is_alive) {
task_stat->heap_count = task_info->task_stat.heap_count;
heap_stats_t *heap_info = NULL;
STAILQ_FOREACH(heap_info, &task_info->heaps_stats, next_heap_stat) {
task_stat->alloc_count += heap_info->heap_stat.alloc_count;
}
break;
}
}
xSemaphoreGive(s_task_tracking_mutex);
// allocate the memory used to store the statistics of allocs, heaps
if (task_stat->heap_count != 0) {
heap_t *heap_used_for_alloc = find_biggest_heap();
task_stat->heap_stat_start = multi_heap_malloc(heap_used_for_alloc->heap, task_stat->heap_count * sizeof(heap_stat_t));
if (task_stat->heap_stat_start == NULL) {
return ESP_FAIL;
}
}
if (task_stat->alloc_count != 0) {
heap_t *heap_used_for_alloc = find_biggest_heap();
task_stat->alloc_stat_start = multi_heap_malloc(heap_used_for_alloc->heap, task_stat->alloc_count * sizeof(heap_task_block_t));
if (task_stat->alloc_stat_start == NULL) {
return ESP_FAIL;
}
}
return ESP_OK;
}
void heap_caps_free_single_task_stat_arrays(heap_single_task_stat_t *task_stat)
{
if (task_stat->heap_stat_start != NULL) {
heap_t *heap_used_for_alloc = find_containing_heap(task_stat->heap_stat_start);
assert(heap_used_for_alloc != NULL);
multi_heap_free(heap_used_for_alloc->heap, task_stat->heap_stat_start);
task_stat->heap_stat_start = NULL;
task_stat->heap_count = 0;
}
if (task_stat->alloc_stat_start != NULL) {
heap_t *heap_used_for_alloc = find_containing_heap(task_stat->alloc_stat_start);
assert(heap_used_for_alloc != NULL);
multi_heap_free(heap_used_for_alloc->heap, task_stat->alloc_stat_start);
task_stat->alloc_stat_start = NULL;
task_stat->alloc_count = 0;
}
}
esp_err_t heap_caps_alloc_all_task_stat_arrays(heap_all_tasks_stat_t *tasks_stat)
{
tasks_stat->stat_arr = NULL;
tasks_stat->heap_stat_start = NULL;
tasks_stat->alloc_stat_start = NULL;
tasks_stat->task_count = 0;
tasks_stat->heap_count = 0;
tasks_stat->alloc_count = 0;
task_info_t *task_info = NULL;
xSemaphoreTake(s_task_tracking_mutex, portMAX_DELAY);
SLIST_FOREACH(task_info, &task_stats, next_task_info) {
tasks_stat->task_count += 1;
tasks_stat->heap_count += task_info->task_stat.heap_count;
heap_stats_t *heap_info = NULL;
STAILQ_FOREACH(heap_info, &task_info->heaps_stats, next_heap_stat) {
tasks_stat->alloc_count += heap_info->heap_stat.alloc_count;
}
}
xSemaphoreGive(s_task_tracking_mutex);
// allocate the memory used to store the statistics of allocs, heaps and tasks
if (tasks_stat->task_count != 0) {
heap_t *heap_used_for_alloc = find_biggest_heap();
tasks_stat->stat_arr = multi_heap_malloc(heap_used_for_alloc->heap, tasks_stat->task_count * sizeof(task_stat_t));
if (tasks_stat->stat_arr == NULL) {
return ESP_FAIL;
}
}
if (tasks_stat->heap_count != 0) {
heap_t *heap_used_for_alloc = find_biggest_heap();
tasks_stat->heap_stat_start = multi_heap_malloc(heap_used_for_alloc->heap, tasks_stat->heap_count * sizeof(heap_stat_t));
if (tasks_stat->heap_stat_start == NULL) {
return ESP_FAIL;
}
}
if (tasks_stat->alloc_count != 0) {
heap_t *heap_used_for_alloc = find_biggest_heap();
tasks_stat->alloc_stat_start = multi_heap_malloc(heap_used_for_alloc->heap, tasks_stat->alloc_count * sizeof(heap_task_block_t));
if (tasks_stat->alloc_stat_start == NULL) {
return ESP_FAIL;
}
}
return ESP_OK;
}
void heap_caps_free_all_task_stat_arrays(heap_all_tasks_stat_t *tasks_stat)
{
if (tasks_stat->stat_arr != NULL) {
heap_t *heap_used_for_alloc = find_containing_heap(tasks_stat->stat_arr);
assert(heap_used_for_alloc != NULL);
multi_heap_free(heap_used_for_alloc->heap, tasks_stat->stat_arr);
tasks_stat->stat_arr = NULL;
tasks_stat->task_count = 0;
}
if (tasks_stat->heap_stat_start != NULL) {
heap_t *heap_used_for_alloc = find_containing_heap(tasks_stat->heap_stat_start);
assert(heap_used_for_alloc != NULL);
multi_heap_free(heap_used_for_alloc->heap, tasks_stat->heap_stat_start);
tasks_stat->heap_stat_start = NULL;
tasks_stat->heap_count = 0;
}
if (tasks_stat->alloc_stat_start != NULL) {
heap_t *heap_used_for_alloc = find_containing_heap(tasks_stat->alloc_stat_start);
assert(heap_used_for_alloc != NULL);
multi_heap_free(heap_used_for_alloc->heap, tasks_stat->alloc_stat_start);
tasks_stat->alloc_stat_start = NULL;
tasks_stat->alloc_count = 0;
}
}
/*
* Return per-task heap allocation totals and lists of blocks.
*
@@ -80,8 +946,7 @@ size_t heap_caps_get_per_task_info(heap_task_info_params_t *params)
if (i < count) {
params->totals[i].size[type] += bsize;
params->totals[i].count[type] += 1;
}
else {
} else {
if (count < params->max_totals) {
params->totals[count].task = btask;
params->totals[count].size[type] = bsize;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2018-2022 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2018-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -74,6 +74,53 @@ typedef struct {
size_t max_blocks; ///< Capacity of array of task block info structs
} heap_task_info_params_t;
/** @brief Structure providing details about memory usage of a given task on a heap. */
typedef struct {
const char *name; ///< Pointer to the name of the heap defined in soc_memory_types[]
uint32_t caps; ///< All caps supported by the heap (ORED)
size_t size; ///< The available size of the heap
size_t current_usage; ///< The current usage of a given task on the heap
size_t peak_usage; ///< The peak usage since startup on a given task on the heap
size_t alloc_count; ///< The current number of allocation by a given task on the heap
heap_task_block_t *alloc_stat; ///< Pointer to an array of allocation stats for a given task on the heap
} heap_stat_t;
/** @brief Structure providing details about a task. */
typedef struct {
char name[configMAX_TASK_NAME_LEN]; ///< Name of the task
TaskHandle_t handle; ///< Pointer to the task handle.
bool is_alive; ///< Information whether the task is alive (true) or deleted (false)
size_t overall_peak_usage; ///< Information about the memory peak usage across all heaps of a given task
size_t overall_current_usage; ///< Information about the memory current usage across all heaps of a given task
size_t heap_count; ///< Number of different heaps the task has used since its creation
heap_stat_t *heap_stat; ///< Pointer to an array containing statistics of the heaps used by the task
} task_stat_t;
/**
* @brief User interface containing the statistics of a given task
* and the associated memory usage of the task on each heap.
*/
typedef struct {
task_stat_t stat; ///< Statistics of the task
size_t heap_count; ///< size of user defined heap_stat array
heap_stat_t *heap_stat_start; ///> Pointer to the start to the user defined heap_stat array
size_t alloc_count; ///< size of user defined alloc_stat array
heap_task_block_t *alloc_stat_start; ///> Pointer to the start to the user defined alloc_stat array
} heap_single_task_stat_t;
/**
* @brief User interface containing the statistics of all tasks and the associated
* memory usage of those tasks on each heap they use.
*/
typedef struct {
size_t task_count; ///< user defined size of heap_single_task_stat_t array
task_stat_t *stat_arr; ///< Pointer to the user defined array of heap_single_task_stat_t
size_t heap_count; ///< size of user defined heap_stat array
heap_stat_t *heap_stat_start; ///> Pointer to the start to the user defined heap_stat array
size_t alloc_count; ///< size of user defined alloc_stat array
heap_task_block_t *alloc_stat_start; ///> Pointer to the start to the user defined alloc_stat array
} heap_all_tasks_stat_t;
/**
* @brief Return per-task heap allocation totals and lists of blocks.
*
@@ -89,6 +136,115 @@ typedef struct {
*/
extern size_t heap_caps_get_per_task_info(heap_task_info_params_t *params);
/**
* @brief Return per-task heap memory usage and associated allocation information on each heap
* for all tasks.
*
* For each task that has allocated memory from the heap, return information of memory usage and
* allocation information of the task on each heap the task has used.
*
* @param tasks_stat Structure to hold the memory usage statistics of all tasks
* (@see heap_all_tasks_stat_t).
* @return ESP_OK if the information were gathered successfully.
* ESP_ERR_INVALID_ARG if the user defined field in heap_all_tasks_stat_t are not set properly
*/
esp_err_t heap_caps_get_all_task_stat(heap_all_tasks_stat_t *tasks_stat);
/**
* @brief Return heap memory usage and associated allocation information on each heap for a given task.
*
* @param[in] task_handle handle of the task. If NULL, the function will get the current task
* handle and return the statistics of this task.
* @param[out] task_stat Structure to hold the memory usage statistics of the task defined by task_handle
* @return ESP_OK if the information were gathered successfully.
* ESP_ERR_INVALID_ARG if the user defined field in heap_single_task_stat_t are not set properly
*/
esp_err_t heap_caps_get_single_task_stat(heap_single_task_stat_t *task_stat, TaskHandle_t task_handle);
/**
* @brief Print heap memory usage and associated allocation information on each heap for all created tasks
* since startup (running and deleted ones when CONFIG_HEAP_TRACK_DELETED_TASKS is enabled).
*
* @note This function is an alternative to heap_caps_get_all_task_stat if the goal is just to print information
* and not manipulate them.
*/
void heap_caps_print_all_task_stat(void);
/**
* @brief Print summary information of all tasks
*
* @note The information printed by this function is an array formatted log of task_stat_t content for each running
* task (and deleted ones if HEAP_TRACK_DELETED_TASKS is enabled)
*/
void heap_caps_print_all_task_stat_overview(void);
/**
* @brief Print heap memory usage and associated allocation information on each heap for a given task.
*
* @note This function is an alternative to heap_caps_get_single_task_stat if the goal is just to print information
* and not manipulate them.
*
* @param task_handle The task handle of the task to get memory usage and associated allocation information from.
*/
void heap_caps_print_single_task_stat(TaskHandle_t task_handle);
/**
* @brief Print summary information of a given task
*
* @note The information printed by this function is an array formatted log of task_stat_t content for the given
* task. This function will not print the task summary information if the given task is deleted and
* HEAP_TRACK_DELETED_TASKS is disabled.
*
* @param task_handle The task handle of the task to get memory usage and associated allocation information from.
*/
void heap_caps_print_single_task_stat_overview(TaskHandle_t task_handle);
/**
* @brief Allocate the memory used to store the heap and alloc statistics and fill task_stat
* with the pointer to those allocations and the number of heaps and allocs statistics available
* for the given task.
*
* @note If NULL is passed as parameter for the task_handle, the information on the currently running
* task will be returned. This function should be called prior to heap_caps_get_single_task_stat() if the user
* wishes to use dynamic allocation to store statistics.
*
* @param task_handle The task from which to get the information. If NULL,
* this function will return the number of heap used by the calling task.
* @param task_stat Structure containing information filled by this function.
* @return ESP_OK if the memory necessary to gather the statistics was allocated successfully.
* ESP_FAIL if not enough memory space is available to store all statistics.
*/
esp_err_t heap_caps_alloc_single_task_stat_arrays(heap_single_task_stat_t *task_stat, TaskHandle_t task_handle);
/**
* @brief Free the memory allocated to store heap and alloc statistics by calling
* heap_caps_alloc_single_task_stat_arrays.
*
* @param task_stat Structure from which to free the allocated memory used to store statistics
*/
void heap_caps_free_single_task_stat_arrays(heap_single_task_stat_t *task_stat);
/**
* @brief Allocate the memory used to store the tasks, heaps and allocs statistics and fill tasks_stat
* with the pointer to those allocations and the number of tasks, heaps and allocs statistics available.
*
* @note This function should be called prior to heap_caps_get_all_task_stat() if the user
* wishes to use dynamic allocation to store statistics.
*
* @param tasks_stat Structure containing information filled by this function.
* @return ESP_OK if the memory necessary to gather the statistics was allocated successfully.
* ESP_FAIL if not enough memory space is available to store all statistics.
*/
esp_err_t heap_caps_alloc_all_task_stat_arrays(heap_all_tasks_stat_t *tasks_stat);
/**
* @brief Free the memory allocated to store task, heap and alloc statistics
* by calling heap_caps_alloc_all_task_stat_arrays.
*
* @param tasks_stat Structure from which to free the allocated memory used to store statistics
*/
void heap_caps_free_all_task_stat_arrays(heap_all_tasks_stat_t *tasks_stat);
#ifdef __cplusplus
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -9,7 +9,7 @@
#include <stdbool.h>
/* multi_heap is a heap implementation for handling multiple
heterogenous heaps in a single program.
heterogeneous heaps in a single program.
Any contiguous block of memory can be registered as a heap.
*/
@@ -230,6 +230,15 @@ typedef bool (*multi_heap_walker_cb_t)(void *block_ptr, size_t block_size, int b
*/
void multi_heap_walk(multi_heap_handle_t heap, multi_heap_walker_cb_t walker_func, void *user_data);
/*
* @brief Get the size of the block (including eventual metadata added by the heap component) located at p
*
* @param heap The heap in which the pointer p is located
* @param p The pointer to the data block to retrieve the same from
* @return size_t The size of the data block in bytes.
*/
size_t multi_heap_get_full_block_size(multi_heap_handle_t heap, void *p);
#ifdef __cplusplus
}
#endif

View File

@@ -46,8 +46,10 @@ entries:
multi_heap_poisoning:multi_heap_internal_check_block_poisoning (noflash)
multi_heap_poisoning:multi_heap_internal_poison_fill_region (noflash)
multi_heap_poisoning:multi_heap_aligned_alloc_offs (noflash)
multi_heap_poisoning:multi_heap_get_full_block_size (noflash)
else:
multi_heap:multi_heap_aligned_alloc_offs (noflash)
multi_heap:multi_heap_get_full_block_size (noflash)
if HEAP_POISONING_COMPREHENSIVE = y:
multi_heap_poisoning:verify_fill_pattern (noflash)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -32,7 +32,12 @@ void *multi_heap_aligned_alloc_offs(multi_heap_handle_t heap, size_t size, size_
return multi_heap_aligned_alloc_impl_offs(heap, size, alignment, offset);
}
#if (!defined CONFIG_HEAP_TLSF_USE_ROM_IMPL)
size_t multi_heap_get_full_block_size(multi_heap_handle_t heap, void *p)
{
return multi_heap_get_allocated_size_impl(heap, p);
}
#if(!defined CONFIG_HEAP_TLSF_USE_ROM_IMPL)
/* if no heap poisoning, public API aliases directly to these implementations */
void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
__attribute__((alias("multi_heap_malloc_impl")));
@@ -74,7 +79,6 @@ void *multi_heap_get_block_address(multi_heap_block_handle_t block)
#define ALIGN_UP(X) ALIGN((X)+sizeof(void *)-1)
#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1))
typedef struct multi_heap_info {
void *lock;
size_t free_bytes;

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2015-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -32,7 +32,7 @@
#ifdef MULTI_HEAP_POISONING
/* Alias MULTI_HEAP_POISONING_SLOW to SLOW for better readabilty */
/* Alias MULTI_HEAP_POISONING_SLOW to SLOW for better readability */
#ifdef SLOW
#error "external header has defined SLOW"
#endif
@@ -354,6 +354,13 @@ void *multi_heap_get_block_address(multi_heap_block_handle_t block)
return head + sizeof(poison_head_t);
}
size_t multi_heap_get_full_block_size(multi_heap_handle_t heap, void *p)
{
poison_head_t *head = verify_allocated_region(p, true);
assert(head != NULL);
return multi_heap_get_allocated_size_impl(heap, head);
}
multi_heap_handle_t multi_heap_register(void *start, size_t size)
{
#ifdef SLOW

View File

@@ -0,0 +1,22 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#ifdef CONFIG_HEAP_TASK_TRACKING
#ifdef __cplusplus
extern "C" {
#endif
void heap_caps_update_per_task_info_alloc(heap_t *heap, void *ptr, size_t size, uint32_t caps);
void heap_caps_update_per_task_info_free(heap_t *heap, void *ptr);
void heap_caps_update_per_task_info_realloc(heap_t *heap, void *old_ptr, void *new_ptr, size_t old_size, TaskHandle_t old_task, size_t new_size, uint32_t caps);
#ifdef __cplusplus
}
#endif
#endif // CONFIG_HEAP_TASK_TRACKING

View File

@@ -18,7 +18,7 @@
#include <stdlib.h>
#include <sys/param.h>
#if !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT)
#if !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT) && !(CONFIG_HEAP_TASK_TRACKING)
TEST_CASE("Capabilities allocator test", "[heap]")
{
char *m1, *m2[10];
@@ -108,7 +108,7 @@ TEST_CASE("Capabilities allocator test", "[heap]")
free(m1);
printf("Done.\n");
}
#endif // !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT)
#endif // !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT) && !(CONFIG_HEAP_TASK_TRACKING)
#ifdef CONFIG_ESP32_IRAM_AS_8BIT_ACCESSIBLE_MEMORY
TEST_CASE("IRAM_8BIT capability test", "[heap]")
@@ -230,7 +230,7 @@ TEST_CASE("heap caps minimum free bytes fault cases", "[heap]")
/* Small function runs from IRAM to check that malloc/free/realloc
all work OK when cache is disabled...
*/
#if !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT) && !CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH
#if !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT) && !CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH && !CONFIG_HEAP_TASK_TRACKING
static IRAM_ATTR __attribute__((noinline)) bool iram_malloc_test(void)
{
spi_flash_guard_get()->start(); // Disables flash cache
@@ -252,7 +252,7 @@ TEST_CASE("heap_caps_xxx functions work with flash cache disabled", "[heap]")
{
TEST_ASSERT( iram_malloc_test() );
}
#endif // !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT) && !CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH
#endif // !(CONFIG_ESP_SYSTEM_MEMPROT_FEATURE || CONFIG_ESP_SYSTEM_PMP_IDRAM_SPLIT) && !CONFIG_HEAP_PLACE_FUNCTION_INTO_FLASH && !CONFIG_HEAP_TASK_TRACKING
#ifdef CONFIG_HEAP_ABORT_WHEN_ALLOCATION_FAILS
TEST_CASE("When enabled, allocation operation failure generates an abort", "[heap][reset=abort,SW_CPU_RESET]")
@@ -272,6 +272,7 @@ void heap_caps_alloc_failed_hook(size_t requested_size, uint32_t caps, const cha
called_user_failed_hook = true;
}
TEST_CASE("user provided alloc failed hook must be called when allocation fails", "[heap]")
{
TEST_ASSERT(heap_caps_register_failed_alloc_callback(heap_caps_alloc_failed_hook) == ESP_OK);

View File

@@ -1,10 +1,11 @@
/*
* SPDX-FileCopyrightText: 2022-2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2022-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Unlicense OR CC0-1.0
*/
#include "unity.h"
#include "stdio.h"
#include <string.h>
#include "freertos/FreeRTOS.h"
#include "freertos/task.h"
@@ -12,52 +13,34 @@
#include "esp_heap_task_info.h"
// This test only apply when task tracking is enabled
#if defined(CONFIG_HEAP_TASK_TRACKING)
#if defined(CONFIG_HEAP_TASK_TRACKING) && defined(CONFIG_HEAP_TRACK_DELETED_TASKS)
#define MAX_TASK_NUM 10 // Max number of per tasks info that it can store
#define MAX_BLOCK_NUM 10 // Max number of per block info that it can store
#define ALLOC_BYTES 36
static void check_heap_task_info(TaskHandle_t taskHdl)
static void check_heap_task_info(const char *task_name, const bool task_active)
{
size_t num_totals = 0;
heap_task_totals_t s_totals_arr[MAX_TASK_NUM];
heap_task_block_t s_block_arr[MAX_BLOCK_NUM];
heap_all_tasks_stat_t heap_tasks_stat;
heap_task_info_params_t heap_info = {0};
heap_info.caps[0] = MALLOC_CAP_32BIT; // Gets heap info with CAP_32BIT capabilities
heap_info.mask[0] = MALLOC_CAP_32BIT;
heap_info.tasks = NULL; // Passing NULL captures heap info for all tasks
heap_info.num_tasks = 0;
heap_info.totals = s_totals_arr; // Gets task wise allocation details
heap_info.num_totals = &num_totals;
heap_info.max_totals = MAX_TASK_NUM; // Maximum length of "s_totals_arr"
heap_info.blocks = s_block_arr; // Gets block wise allocation details. For each block, gets owner task, address and size
heap_info.max_blocks = MAX_BLOCK_NUM; // Maximum length of "s_block_arr"
heap_tasks_stat.task_count = 10;
heap_tasks_stat.heap_count = 20;
heap_tasks_stat.alloc_count = 60;
task_stat_t arr_task_stat[heap_tasks_stat.task_count];
heap_stat_t arr_heap_stat[heap_tasks_stat.heap_count];
heap_task_block_t arr_alloc_stat[heap_tasks_stat.alloc_count];
heap_tasks_stat.stat_arr = arr_task_stat;
heap_tasks_stat.heap_stat_start = arr_heap_stat;
heap_tasks_stat.alloc_stat_start = arr_alloc_stat;
heap_caps_get_per_task_info(&heap_info);
heap_caps_get_all_task_stat(&heap_tasks_stat);
bool task_found = false;
for (int i = 0 ; i < *heap_info.num_totals; i++) {
for (size_t task_index = 0; task_index < heap_tasks_stat.task_count; task_index++) {
// the prescheduler allocs and free are stored as a
// task with a handle set to 0, avoid calling pcTaskGetName
// in that case.
if (heap_info.totals[i].task != 0 && (uint32_t*)(heap_info.totals[i].task) == (uint32_t*)taskHdl) {
task_stat_t task_stat = heap_tasks_stat.stat_arr[task_index];
if (0 == strcmp(task_stat.name, task_name) && task_stat.is_alive == task_active) {
task_found = true;
// check the number of byte allocated according to the task tracking feature
// and make sure it matches the expected value. The size returned by the
// heap_caps_get_per_task_info includes the size of the block owner (4 bytes)
TEST_ASSERT(heap_info.totals[i].size[0] == ALLOC_BYTES + 4);
}
// test that if not 0, the task handle corresponds to an actual task.
// this test is to make sure no rubbish is stored as a task handle.
if (heap_info.totals[i].task != 0) {
// feeding the task name returned by pcTaskGetName() to xTaskGetHandle().
// xTaskGetHandle would return the task handler used as parameter in
// pcTaskGetName if the task handle is valid. Otherwise, it will return
// NULL or just crash if the pointer to the task name is complete nonsense.
TEST_ASSERT_EQUAL(heap_info.totals[i].task, xTaskGetHandle(pcTaskGetName(heap_info.totals[i].task)));
}
}
TEST_ASSERT_TRUE(task_found);
@@ -70,36 +53,196 @@ static void test_task(void *args)
abort();
}
// unlock main too check task tracking feature
// unlock main to check task tracking feature
xTaskNotifyGive((TaskHandle_t)args);
// wait for main to delete this task
// wait for main to give back the hand to the task to delete the pointer
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
heap_caps_free(ptr);
// unlock main to delete the task
xTaskNotifyGive((TaskHandle_t)args);
// wait for main to delete the task
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
}
static void test_task_a(void *args)
{
test_task(args);
}
static void test_task_b(void *args)
{
test_task(args);
}
/* This test will create a task, wait for the task to allocate / free memory
* so it is added to the task tracking info in the heap component and then
* call heap_caps_get_per_task_info() and make sure a task with the name test_task
* call heap_caps_get_all_task_stat() and make sure a task with the name test_task
* is in the list, and that the right ALLOC_BYTES are shown.
*
* Note: The memory allocated in the task is not freed for the sake of the test
* so it is normal that memory leak will be reported by the test environment. It
* shouldn't be more than the byte allocated by the task + associated metadata
*/
TEST_CASE("heap task tracking reports created task", "[heap]")
TEST_CASE("heap task tracking reports created / deleted task", "[heap]")
{
TaskHandle_t test_task_handle;
xTaskCreate(&test_task, "test_task", 3072, (void *)xTaskGetCurrentTaskHandle(), 5, &test_task_handle);
const char *task_name = "test_task_a";
xTaskCreate(&test_task_a, task_name, 3072, (void *)xTaskGetCurrentTaskHandle(), 5, &test_task_handle);
// wait for task to allocate memory and give the hand back to the test
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// check that the task is referenced in the list of task
// by the task tracking feature. Check the number of bytes
// the task has allocated and make sure it is matching the
// expected value.
check_heap_task_info(test_task_handle);
// by the task tracking feature. check that the task name is
// matching and the task is running.
check_heap_task_info(task_name, true);
// unlock main to check task tracking feature
xTaskNotifyGive(test_task_handle);
// wait for the task to free the memory
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// delete the task.
vTaskDelete(test_task_handle);
// check that the task is referenced in the list of task
// by the task tracking feature. check that the task name is
// matching and the task is marked as deleted.
check_heap_task_info(task_name, false);
}
/* The test case calls heap_caps_alloc_all_task_stat_arrays and heap_caps_get_all_task_stat
* after creating new tasks and allocating in new heaps to check that the number of tasks, heaps and
* allocation statistics provided by heap_caps_get_all_task_stat is updated accordingly.
*/
TEST_CASE("heap task tracking check alloc array and get all tasks info", "[heap]")
{
// call heap_caps_alloc_all_task_stat_arrays and save the number of tasks, heaps and allocs
// statistics available when the test starts
heap_all_tasks_stat_t tasks_stat;
esp_err_t ret_val = heap_caps_alloc_all_task_stat_arrays(&tasks_stat);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
ret_val = heap_caps_get_all_task_stat(&tasks_stat);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
const size_t nb_of_tasks_stat = tasks_stat.task_count;
const size_t nb_of_heaps_stat = tasks_stat.heap_count;
const size_t nb_of_allocs_stat = tasks_stat.alloc_count;
heap_caps_free_all_task_stat_arrays(&tasks_stat);
// Create a task that will allocate memory
TaskHandle_t test_task_handle;
const char *task_name = "test_task_b";
xTaskCreate(&test_task_b, task_name, 3072, (void *)xTaskGetCurrentTaskHandle(), 5, &test_task_handle);
// wait for the task to give the hand to the test and call heap_caps_alloc_all_task_stat_arrays.
// Compare the number of tasks, heaps and allocs statistics available to make sure they contain the stats
// related to the newly created task.
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
ret_val = heap_caps_alloc_all_task_stat_arrays(&tasks_stat);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
ret_val = heap_caps_get_all_task_stat(&tasks_stat);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
TEST_ASSERT(nb_of_tasks_stat < tasks_stat.task_count);
TEST_ASSERT(nb_of_heaps_stat < tasks_stat.heap_count);
TEST_ASSERT(nb_of_allocs_stat < tasks_stat.alloc_count);
// free the arrays of stat in tasks_stat and reset the counters
heap_caps_free_all_task_stat_arrays(&tasks_stat);
// unlock task to delete allocated memory
xTaskNotifyGive(test_task_handle);
// wait for the task to free the memory
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// delete the task.
vTaskDelete(test_task_handle);
}
static void task_self_check(void *args)
{
const size_t alloc_size = 100;
const uint32_t caps = MALLOC_CAP_32BIT | MALLOC_CAP_DMA;
// call heap_caps_alloc_single_task_stat_arrays on the current task. Since no alloc was made, the
// function should return ESP_OK but the heap_count and alloc_count should be 0, the pointer to the
// allocated arrays should be NULL.
heap_single_task_stat_t task_stat;
esp_err_t ret_val = heap_caps_alloc_single_task_stat_arrays(&task_stat, NULL);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
TEST_ASSERT_EQUAL(task_stat.heap_count, 0);
TEST_ASSERT_EQUAL(task_stat.alloc_count, 0);
TEST_ASSERT_NULL(task_stat.heap_stat_start);
TEST_ASSERT_NULL(task_stat.alloc_stat_start);
// allocate memory
void *ptr = heap_caps_malloc(alloc_size, caps);
// allocate arrays for the statistics of the task. This time, it should succeed as we just
// allocated memory. This information should be stored in the task info list.
ret_val = heap_caps_alloc_single_task_stat_arrays(&task_stat, NULL);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
// The number of heap info should be one and the number of alloc should be one too
TEST_ASSERT_EQUAL(1, task_stat.heap_count);
TEST_ASSERT_EQUAL(1, task_stat.alloc_count);
ret_val = heap_caps_get_single_task_stat(&task_stat, NULL);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
// the caps of the heap info should contain the caps used to allocate the memory
TEST_ASSERT((task_stat.stat.heap_stat[0].caps & caps) == caps);
// The size of the alloc found in the stat should be not null and the address
// of the alloc should match too
TEST_ASSERT(task_stat.stat.heap_stat[0].alloc_stat[0].size > 0);
TEST_ASSERT(task_stat.stat.heap_stat[0].alloc_stat[0].address == ptr);
// free the memory and get the updated statistics on the task
heap_caps_free(ptr);
heap_caps_free_single_task_stat_arrays(&task_stat);
ret_val = heap_caps_alloc_single_task_stat_arrays(&task_stat, NULL);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
// The number of heap info should be one and the number of alloc should be zero
// since the allocated memory was just freed
TEST_ASSERT_EQUAL(1, task_stat.heap_count);
TEST_ASSERT_EQUAL(0, task_stat.alloc_count);
ret_val = heap_caps_get_single_task_stat(&task_stat, NULL);
TEST_ASSERT_EQUAL(ret_val, ESP_OK);
TEST_ASSERT((task_stat.stat.heap_stat[0].caps & caps) == caps);
TEST_ASSERT(task_stat.stat.heap_stat[0].alloc_stat == NULL);
// unlock main to check task tracking feature
xTaskNotifyGive((TaskHandle_t)args);
// wait for main to give back the hand to the task to delete the pointer
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
}
/* The test case calls heap_caps_alloc_single_task_stat_arrays and heap_caps_get_single_task_stat
* after creating new task and allocating in new heaps to check that the number of heaps and
* allocation statistics provided by heap_caps_get_single_task_stat is updated accordingly.
*/
TEST_CASE("heap task tracking check alloc arrays and get info on specific task", "[heap]")
{
TaskHandle_t test_task_handle;
const char *task_name = "task_self_check";
xTaskCreate(&task_self_check, task_name, 3072, (void *)xTaskGetCurrentTaskHandle(), 5, &test_task_handle);
// wait for the task to free the memory
ulTaskNotifyTake(pdTRUE, portMAX_DELAY);
// delete the task.
vTaskDelete(test_task_handle);

View File

@@ -3,3 +3,4 @@ CONFIG_HEAP_POISONING_LIGHT=n
CONFIG_HEAP_POISONING_COMPREHENSIVE=n
CONFIG_HEAP_TASK_TRACKING=y # to make sure the config doesn't induce unexpected behavior
CONFIG_HEAP_TRACK_DELETED_TASKS=y # to make sure the config doesn't induce unexpected behavior

View File

@@ -941,7 +941,6 @@ examples/system/gcov/components/sample/some_funcs.c
examples/system/gcov/main/gcov_example_func.c
examples/system/gcov/main/gcov_example_main.c
examples/system/gdbstub/main/gdbstub_main.c
examples/system/heap_task_tracking/main/heap_task_tracking_main.c
examples/system/himem/main/himem_example_main.c
examples/system/ota/advanced_https_ota/main/advanced_https_ota_example.c
examples/system/ota/native_ota_example/main/native_ota_example.c