Merge branch 'feature/tlsf-dynamic-control-size_v4.3' into 'release/v4.3'

heap: Update tlsf to use dynamic metadata size and lower RAM usage of heap component (backport v4.3)

See merge request espressif/esp-idf!21783
This commit is contained in:
Jiang Jiang Jian
2023-02-26 15:51:45 +08:00
17 changed files with 374 additions and 218 deletions

View File

@ -31,10 +31,9 @@ __esp_stack_guard_setup (void)
__stack_chk_guard = (void *)esp_random();
}
void __stack_chk_fail (void)
IRAM_ATTR void __stack_chk_fail (void)
{
esp_rom_printf("\r\nStack smashing protect failure!\r\n\r\n");
abort();
esp_system_abort(DRAM_STR("Stack smashing protect failure!"));
}
#endif

View File

@ -23,7 +23,6 @@
#include "heap_private.h"
#include "esp_system.h"
// forward declaration
IRAM_ATTR static void *heap_caps_realloc_base( void *ptr, size_t size, uint32_t caps);
@ -61,7 +60,7 @@ IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len)
}
static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
IRAM_ATTR NOINLINE_ATTR static void heap_caps_alloc_failed(size_t requested_size, uint32_t caps, const char *function_name)
{
if (alloc_failed_callback) {
alloc_failed_callback(requested_size, caps, function_name);

View File

@ -51,7 +51,7 @@ extern SLIST_HEAD(registered_heap_ll, heap_t_) registered_heaps;
bool heap_caps_match(const heap_t *heap, uint32_t caps);
/* return all possible capabilities (across all priorities) for a given heap */
inline static IRAM_ATTR uint32_t get_all_caps(const heap_t *heap)
inline static __attribute__((always_inline)) uint32_t get_all_caps(const heap_t *heap)
{
if (heap->heap == NULL) {
return 0;

View File

@ -90,12 +90,6 @@ tlsf_static_assert(sizeof(int) * CHAR_BIT == 32);
tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32);
tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64);
/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT);
/* Ensure we've properly tuned our sizes. */
tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT);
static inline __attribute__((__always_inline__)) size_t align_up(size_t x, size_t align)
{
tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two");
@ -120,7 +114,7 @@ static inline __attribute__((__always_inline__)) void* align_ptr(const void* ptr
** Adjust an allocation size to be aligned to word size, and no smaller
** than internal minimum.
*/
static inline __attribute__((__always_inline__)) size_t adjust_request_size(size_t size, size_t align)
static inline __attribute__((__always_inline__)) size_t adjust_request_size(tlsf_t tlsf, size_t size, size_t align)
{
size_t adjust = 0;
if (size)
@ -128,7 +122,7 @@ static inline __attribute__((__always_inline__)) size_t adjust_request_size(size
const size_t aligned = align_up(size, align);
/* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */
if (aligned < block_size_max)
if (aligned < tlsf_block_size_max(tlsf))
{
adjust = tlsf_max(aligned, block_size_min);
}
@ -141,34 +135,34 @@ static inline __attribute__((__always_inline__)) size_t adjust_request_size(size
** the documentation found in the white paper.
*/
static inline __attribute__((__always_inline__)) void mapping_insert(size_t size, int* fli, int* sli)
static inline __attribute__((__always_inline__)) void mapping_insert(control_t *control, size_t size, int* fli, int* sli)
{
int fl, sl;
if (size < SMALL_BLOCK_SIZE)
if (size < control->small_block_size)
{
/* Store small blocks in first list. */
fl = 0;
sl = tlsf_cast(int, size) >> 2;
sl = tlsf_cast(int, size) / (control->small_block_size / control->sl_index_count);
}
else
{
fl = tlsf_fls(size);
sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2);
fl -= (FL_INDEX_SHIFT - 1);
sl = tlsf_cast(int, size >> (fl - control->sl_index_count_log2)) ^ (1 << control->sl_index_count_log2);
fl -= (control->fl_index_shift - 1);
}
*fli = fl;
*sli = sl;
}
/* This version rounds up to the next block size (for allocations) */
static inline __attribute__((__always_inline__)) void mapping_search(size_t size, int* fli, int* sli)
static inline __attribute__((__always_inline__)) void mapping_search(control_t *control, size_t size, int* fli, int* sli)
{
if (size >= SMALL_BLOCK_SIZE)
if (size >= control->small_block_size)
{
const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1;
const size_t round = (1 << (tlsf_fls(size) - control->sl_index_count_log2)) - 1;
size += round;
}
mapping_insert(size, fli, sli);
mapping_insert(control, size, fli, sli);
}
static inline __attribute__((__always_inline__)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli)
@ -200,7 +194,7 @@ static inline __attribute__((__always_inline__)) block_header_t* search_suitable
*sli = sl;
/* Return the first block in the free list. */
return control->blocks[fl][sl];
return control->blocks[fl*control->sl_index_count + sl];
}
/* Remove a free block from the free list.*/
@ -214,9 +208,9 @@ static inline __attribute__((__always_inline__)) void remove_free_block(control_
prev->next_free = next;
/* If this block is the head of the free list, set new head. */
if (control->blocks[fl][sl] == block)
if (control->blocks[fl*control->sl_index_count + sl] == block)
{
control->blocks[fl][sl] = next;
control->blocks[fl*control->sl_index_count + sl] = next;
/* If the new head is null, clear the bitmap. */
if (next == &control->block_null)
@ -235,7 +229,7 @@ static inline __attribute__((__always_inline__)) void remove_free_block(control_
/* Insert a free block into the free block list. */
static inline __attribute__((__always_inline__)) void insert_free_block(control_t* control, block_header_t* block, int fl, int sl)
{
block_header_t* current = control->blocks[fl][sl];
block_header_t* current = control->blocks[fl*control->sl_index_count + sl];
tlsf_assert(current && "free list cannot have a null entry");
tlsf_assert(block && "cannot insert a null entry into the free list");
block->next_free = current;
@ -248,7 +242,7 @@ static inline __attribute__((__always_inline__)) void insert_free_block(control_
** Insert the new block at the head of the list, and mark the first-
** and second-level bitmaps appropriately.
*/
control->blocks[fl][sl] = block;
control->blocks[fl*control->sl_index_count + sl] = block;
control->fl_bitmap |= (1 << fl);
control->sl_bitmap[fl] |= (1 << sl);
}
@ -257,7 +251,7 @@ static inline __attribute__((__always_inline__)) void insert_free_block(control_
static inline __attribute__((__always_inline__)) void block_remove(control_t* control, block_header_t* block)
{
int fl, sl;
mapping_insert(block_size(block), &fl, &sl);
mapping_insert(control, block_size(block), &fl, &sl);
remove_free_block(control, block, fl, sl);
}
@ -265,7 +259,7 @@ static inline __attribute__((__always_inline__)) void block_remove(control_t* co
static inline __attribute__((__always_inline__)) void block_insert(control_t* control, block_header_t* block)
{
int fl, sl;
mapping_insert(block_size(block), &fl, &sl);
mapping_insert(control, block_size(block), &fl, &sl);
insert_free_block(control, block, fl, sl);
}
@ -428,7 +422,7 @@ static inline __attribute__((__always_inline__)) block_header_t* block_locate_f
if (size)
{
mapping_search(size, &fl, &sl);
mapping_search(control, size, &fl, &sl);
/*
** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up
@ -436,7 +430,7 @@ static inline __attribute__((__always_inline__)) block_header_t* block_locate_f
** So, we protect against that here, since this is the only callsite of mapping_search.
** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range.
*/
if (fl < FL_INDEX_COUNT)
if (fl < control->fl_index_count)
{
block = search_suitable_block(control, &fl, &sl);
}
@ -465,22 +459,66 @@ static inline __attribute__((__always_inline__)) void* block_prepare_used(contro
}
/* Clear structure and point all empty lists at the null block. */
static void control_construct(control_t* control)
static control_t* control_construct(control_t* control, size_t bytes)
{
int i, j;
// check that the requested size can at least hold the control_t. This will allow us
// to fill in the field of control_t necessary to determine the final size of
// the metadata overhead and check that the requested size can hold
// this data and at least a block of minimum size
if (bytes < sizeof(control_t))
{
return NULL;
}
/* Find the closest power of two for first layer */
control->fl_index_max = 32 - __builtin_clz(bytes);
/* adapt second layer to the pool */
if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3;
else if (bytes <= 256 * 1024) control->sl_index_count_log2 = 4;
else control->sl_index_count_log2 = 5;
control->fl_index_shift = (control->sl_index_count_log2 + ALIGN_SIZE_LOG2);
control->sl_index_count = 1 << control->sl_index_count_log2;
control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1;
control->small_block_size = 1 << control->fl_index_shift;
// the total size fo the metadata overhead is the size of the control_t
// added to the size of the sl_bitmaps and the size of blocks
control->size = sizeof(control_t) + (sizeof(*control->sl_bitmap) * control->fl_index_count) +
(sizeof(*control->blocks) * (control->fl_index_count * control->sl_index_count));
// check that the requested size can hold the whole control structure and
// a small block at least
if (bytes < control->size + block_size_min)
{
return NULL;
}
control->block_null.next_free = &control->block_null;
control->block_null.prev_free = &control->block_null;
control->fl_bitmap = 0;
for (i = 0; i < FL_INDEX_COUNT; ++i)
control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap));
control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks));
/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */
tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count");
/* Ensure we've properly tuned our sizes. */
tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match");
for (int i = 0; i < control->fl_index_count; ++i)
{
control->sl_bitmap[i] = 0;
for (j = 0; j < SL_INDEX_COUNT; ++j)
for (int j = 0; j < control->sl_index_count; ++j)
{
control->blocks[i][j] = &control->block_null;
control->blocks[i*control->sl_index_count + j] = &control->block_null;
}
}
return control;
}
/*
@ -520,14 +558,14 @@ int tlsf_check(tlsf_t tlsf)
int status = 0;
/* Check that the free lists and bitmaps are accurate. */
for (i = 0; i < FL_INDEX_COUNT; ++i)
for (i = 0; i < control->fl_index_count; ++i)
{
for (j = 0; j < SL_INDEX_COUNT; ++j)
for (j = 0; j < control->sl_index_count; ++j)
{
const int fl_map = control->fl_bitmap & (1 << i);
const int sl_list = control->sl_bitmap[i];
const int sl_map = sl_list & (1 << j);
const block_header_t* block = control->blocks[i][j];
const block_header_t* block = control->blocks[i*control->sl_index_count + j];
/* Check that first- and second-level lists agree. */
if (!fl_map)
@ -554,7 +592,7 @@ int tlsf_check(tlsf_t tlsf)
tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free");
tlsf_insist(block_size(block) >= block_size_min && "block not minimum size");
mapping_insert(block_size(block), &fli, &sli);
mapping_insert(control, block_size(block), &fli, &sli);
tlsf_insist(fli == i && sli == j && "block size indexed in wrong list");
block = block->next_free;
}
@ -609,13 +647,33 @@ int tlsf_check_pool(pool_t pool)
return integ.status;
}
size_t tlsf_fit_size(tlsf_t tlsf, size_t size)
{
/* because it's GoodFit, allocable size is one range lower */
if (size && tlsf != NULL)
{
size_t sl_interval;
control_t* control = tlsf_cast(control_t*, tlsf);
sl_interval = (1 << (32 - __builtin_clz(size) - 1)) / control->sl_index_count;
return size & ~(sl_interval - 1);
}
return 0;
}
/*
** Size of the TLSF structures in a given memory block passed to
** tlsf_create, equal to the size of a control_t
*/
size_t tlsf_size(void)
size_t tlsf_size(tlsf_t tlsf)
{
return sizeof(control_t);
if (tlsf == NULL)
{
return 0;
}
control_t* control = tlsf_cast(control_t*, tlsf);
return control->size;
}
size_t tlsf_align_size(void)
@ -628,9 +686,14 @@ size_t tlsf_block_size_min(void)
return block_size_min;
}
size_t tlsf_block_size_max(void)
size_t tlsf_block_size_max(tlsf_t tlsf)
{
return block_size_max;
if (tlsf == NULL)
{
return 0;
}
control_t* control = tlsf_cast(control_t*, tlsf);
return tlsf_cast(size_t, 1) << control->fl_index_max;
}
/*
@ -663,16 +726,16 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes)
return 0;
}
if (pool_bytes < block_size_min || pool_bytes > block_size_max)
if (pool_bytes < block_size_min || pool_bytes > tlsf_block_size_max(tlsf))
{
#if defined (TLSF_64BIT)
printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n",
(unsigned int)(pool_overhead + block_size_min),
(unsigned int)((pool_overhead + block_size_max) / 256));
(unsigned int)((pool_overhead + tlsf_block_size_max(tlsf)) / 256));
#else
printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n",
(unsigned int)(pool_overhead + block_size_min),
(unsigned int)(pool_overhead + block_size_max));
(unsigned int)(pool_overhead + tlsf_block_size_max(tlsf)));
#endif
return 0;
}
@ -708,7 +771,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free");
tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero");
mapping_insert(block_size(block), &fl, &sl);
mapping_insert(control, block_size(block), &fl, &sl);
remove_free_block(control, block, fl, sl);
}
@ -717,43 +780,50 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool)
*/
tlsf_t tlsf_create(void* mem)
tlsf_t tlsf_create(void* mem, size_t max_bytes)
{
#if _DEBUG
if (test_ffs_fls())
{
return 0;
return NULL;
}
#endif
if (mem == NULL)
{
return NULL;
}
if (((tlsfptr_t)mem % ALIGN_SIZE) != 0)
{
printf("tlsf_create: Memory must be aligned to %u bytes.\n",
(unsigned int)ALIGN_SIZE);
return 0;
return NULL;
}
control_construct(tlsf_cast(control_t*, mem));
return tlsf_cast(tlsf_t, mem);
control_t* control_ptr = control_construct(tlsf_cast(control_t*, mem), max_bytes);
return tlsf_cast(tlsf_t, control_ptr);
}
pool_t tlsf_get_pool(tlsf_t tlsf)
{
return tlsf_cast(pool_t, (char*)tlsf + tlsf_size());
return tlsf_cast(pool_t, (char*)tlsf + tlsf_size(tlsf));
}
tlsf_t tlsf_create_with_pool(void* mem, size_t bytes)
tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes)
{
tlsf_t tlsf = tlsf_create(mem);
tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size());
tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes);
if (tlsf != NULL)
{
tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf));
}
return tlsf;
}
void* tlsf_malloc(tlsf_t tlsf, size_t size)
{
control_t* control = tlsf_cast(control_t*, tlsf);
size_t adjust = adjust_request_size(size, ALIGN_SIZE);
size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
block_header_t* block = block_locate_free(control, adjust);
return block_prepare_used(control, block, adjust);
}
@ -784,7 +854,7 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset)
{
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
const size_t off_adjust = align_up(data_offset, ALIGN_SIZE);
/*
@ -799,7 +869,7 @@ void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_off
/* The offset is included in both `adjust` and `gap_minimum`, so we
** need to subtract it once.
*/
const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum - off_adjust, align);
const size_t size_with_gap = adjust_request_size(tlsf, adjust + align + gap_minimum - off_adjust, align);
/*
** If alignment is less than or equal to base alignment, we're done, because
@ -912,7 +982,13 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
const size_t cursize = block_size(block);
const size_t combined = cursize + block_size(next) + block_header_overhead;
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE);
// if adjust if equal to 0, the size is too big
if (adjust == 0)
{
return p;
}
tlsf_assert(!block_is_free(block) && "block already marked as free");

View File

@ -78,12 +78,33 @@ typedef struct control_t
/* Empty lists point at this block to indicate they are free. */
block_header_t block_null;
/* Local parameter for the pool. Given the maximum
* value of each field, all the following parameters
* can fit on 4 bytes when using bitfields
*/
unsigned int fl_index_count : 5; // 5 cumulated bits
unsigned int fl_index_shift : 3; // 8 cumulated bits
unsigned int fl_index_max : 6; // 14 cumulated bits
unsigned int sl_index_count : 6; // 20 cumulated bits
/* log2 of number of linear subdivisions of block sizes. Larger
** values require more memory in the control structure. Values of
** 4 or 5 are typical.
*/
unsigned int sl_index_count_log2 : 3; // 23 cumulated bits
unsigned int small_block_size : 8; // 31 cumulated bits
/* size of the metadata ( size of control block,
* sl_bitmap and blocks )
*/
size_t size;
/* Bitmaps for free lists. */
unsigned int fl_bitmap;
unsigned int sl_bitmap[FL_INDEX_COUNT];
unsigned int *sl_bitmap;
/* Head of free lists. */
block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT];
block_header_t** blocks;
} control_t;
#include "heap_tlsf_block_functions.h"
@ -94,8 +115,8 @@ typedef void* tlsf_t;
typedef void* pool_t;
/* Create/destroy a memory pool. */
tlsf_t tlsf_create(void* mem);
tlsf_t tlsf_create_with_pool(void* mem, size_t bytes);
tlsf_t tlsf_create(void* mem, size_t max_bytes);
tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes);
pool_t tlsf_get_pool(tlsf_t tlsf);
/* Add/remove memory pools. */
@ -113,13 +134,23 @@ void tlsf_free(tlsf_t tlsf, void* ptr);
size_t tlsf_block_size(void* ptr);
/* Overheads/limits of internal structures. */
size_t tlsf_size(void);
size_t tlsf_size(tlsf_t tlsf);
size_t tlsf_align_size(void);
size_t tlsf_block_size_min(void);
size_t tlsf_block_size_max(void);
size_t tlsf_block_size_max(tlsf_t tlsf);
size_t tlsf_pool_overhead(void);
size_t tlsf_alloc_overhead(void);
/**
* @brief Return the allocable size based on the size passed
* as parameter
*
* @param tlsf Pointer to the tlsf structure
* @param size The allocation size
* @return size_t The updated allocation size
*/
size_t tlsf_fit_size(tlsf_t tlsf, size_t size);
/* Debugging. */
typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user);
void tlsf_walk_pool(pool_t pool, tlsf_walker walker, void* user);

View File

@ -65,7 +65,6 @@
** bits for FL_INDEX.
*/
#define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*))
#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX)
/*
** block_header_t member functions.

View File

@ -37,90 +37,9 @@
#pragma once
#ifdef ESP_PLATFORM
#include "soc/soc.h"
#if !CONFIG_SPIRAM
#define TLSF_MAX_POOL_SIZE (SOC_DIRAM_DRAM_HIGH - SOC_DIRAM_DRAM_LOW)
#else
#define TLSF_MAX_POOL_SIZE SOC_EXTRAM_DATA_SIZE
#endif
enum tlsf_config
{
/* log2 of number of linear subdivisions of block sizes. Larger
** values require more memory in the control structure. Values of
** 4 or 5 are typical.
*/
SL_INDEX_COUNT_LOG2 = 5,
/* All allocation sizes and addresses are aligned to 4 bytes. */
ALIGN_SIZE_LOG2 = 2,
ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
/*
** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits.
** However, because we linearly subdivide the second-level lists, and
** our minimum size granularity is 4 bytes, it doesn't make sense to
** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4,
** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be
** trying to split size ranges into more slots than we have available.
** Instead, we calculate the minimum threshold size, and place all
** blocks below that size into the 0th first-level list.
*/
/* Tunning the first level, we can reduce TLSF pool overhead
* in exchange of manage a pool smaller than 4GB
*/
#if (TLSF_MAX_POOL_SIZE <= (256 * 1024))
FL_INDEX_MAX = 18, //Each pool can have up 256KB
#elif (TLSF_MAX_POOL_SIZE <= (512 * 1024))
FL_INDEX_MAX = 19, //Each pool can have up 512KB
#elif (TLSF_MAX_POOL_SIZE <= (1 * 1024 * 1024))
FL_INDEX_MAX = 20, //Each pool can have up 1MB
#elif (TLSF_MAX_POOL_SIZE <= (2 * 1024 * 1024))
FL_INDEX_MAX = 21, //Each pool can have up 2MB
#elif (TLSF_MAX_POOL_SIZE <= (4 * 1024 * 1024))
FL_INDEX_MAX = 22, //Each pool can have up 4MB
#elif (TLSF_MAX_POOL_SIZE <= (8 * 1024 * 1024))
FL_INDEX_MAX = 23, //Each pool can have up 8MB
#elif (TLSF_MAX_POOL_SIZE <= (16 * 1024 * 1024))
FL_INDEX_MAX = 24, //Each pool can have up 16MB
#else
#error "Higher TLSF pool sizes should be added for this new config"
#endif
SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
};
#else
enum tlsf_config
{
//Specific configuration for host test.
/* log2 of number of linear subdivisions of block sizes. Larger
** values require more memory in the control structure. Values of
** 4 or 5 are typical.
*/
SL_INDEX_COUNT_LOG2 = 5,
/* All allocation sizes and addresses are aligned to 4 bytes. */
ALIGN_SIZE_LOG2 = 2,
ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2),
/* Tunning the first level, we can reduce TLSF pool overhead
* in exchange of manage a pool smaller than 4GB
*/
FL_INDEX_MAX = 30,
SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2),
FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2),
FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1),
SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT),
};
#endif

View File

@ -0,0 +1,9 @@
# Function placement in IRAM section
The heap component is compiled and linked in a way that minimizes the utilization of the IRAM section of memory without impacting the performance of its core functionalities. For this reason, the heap component API provided through [esp_heap_caps.h](./include/esp_heap_caps.h) and [esp_heap_caps_init.h](./include/esp_heap_caps_init.h) can be sorted into two sets of functions.
1. The performance related functions placed into the IRAM by using the `IRAM_ATTR` defined in [esp_attr.h](./../../components/esp_common/include/esp_attr.h) (e.g., `heap_caps_malloc`, `heap_caps_free`, `heap_caps_realloc`, etc.)
2. The functions that does not require the best of performance placed in the flash (e.g., `heap_caps_print_heap_info`, `heap_caps_dump`, `heap_caps_dump_all`, etc.)
With that in mind, all the functions defined in [multi_heap.c](./multi_heap.c), [multi_heap_poisoning.c](./multi_heap_poisoning.c) and [tlsf.c](./tlsf/tlsf.c) that are directly or indirectly called from one of the heap component API functions placed in IRAM have to also be placed in IRAM. Symmetrically, the functions directly or indirectly called from one of the heap component API functions placed in flash will also be placed in flash.

View File

@ -1,7 +1,47 @@
[mapping:heap]
archive: libheap.a
entries:
heap_tlsf (noflash)
multi_heap (noflash)
heap_tlsf:tlsf_block_size (noflash)
heap_tlsf:tlsf_size (noflash)
heap_tlsf:tlsf_align_size (noflash)
heap_tlsf:tlsf_block_size_min (noflash)
heap_tlsf:tlsf_block_size_max (noflash)
heap_tlsf:tlsf_alloc_overhead (noflash)
heap_tlsf:tlsf_get_pool (noflash)
heap_tlsf:tlsf_malloc (noflash)
heap_tlsf:tlsf_memalign_offs (noflash)
heap_tlsf:tlsf_memalign (noflash)
heap_tlsf:tlsf_free (noflash)
heap_tlsf:tlsf_realloc (noflash)
multi_heap:multi_heap_get_block_address_impl (noflash)
multi_heap:multi_heap_get_allocated_size_impl (noflash)
multi_heap:multi_heap_set_lock (noflash)
multi_heap:multi_heap_get_first_block (noflash)
multi_heap:multi_heap_get_next_block (noflash)
multi_heap:multi_heap_is_free (noflash)
multi_heap:multi_heap_malloc_impl (noflash)
multi_heap:multi_heap_free_impl (noflash)
multi_heap:multi_heap_realloc_impl (noflash)
multi_heap:multi_heap_aligned_alloc_impl_offs (noflash)
multi_heap:multi_heap_aligned_alloc_impl (noflash)
multi_heap:multi_heap_internal_lock (noflash)
multi_heap:multi_heap_internal_unlock (noflash)
multi_heap:assert_valid_block (noflash)
if HEAP_POISONING_DISABLED = n:
multi_heap_poisoning (noflash)
multi_heap_poisoning:poison_allocated_region (noflash)
multi_heap_poisoning:verify_allocated_region (noflash)
multi_heap_poisoning:multi_heap_aligned_alloc (noflash)
multi_heap_poisoning:multi_heap_malloc (noflash)
multi_heap_poisoning:multi_heap_free (noflash)
multi_heap_poisoning:multi_heap_aligned_free (noflash)
multi_heap_poisoning:multi_heap_realloc (noflash)
multi_heap_poisoning:multi_heap_get_block_address (noflash)
multi_heap_poisoning:multi_heap_get_block_owner (noflash)
multi_heap_poisoning:multi_heap_get_allocated_size (noflash)
multi_heap_poisoning:multi_heap_internal_check_block_poisoning (noflash)
multi_heap_poisoning:multi_heap_internal_poison_fill_region (noflash)
if HEAP_POISONING_COMPREHENSIVE = y:
multi_heap_poisoning:verify_fill_pattern (noflash)

View File

@ -85,20 +85,8 @@ typedef struct multi_heap_info {
tlsf_t heap_data;
} heap_t;
/* Return true if this block is free. */
static inline bool is_free(const block_header_t *block)
{
return ((block->size & 0x01) != 0);
}
/* Data size of the block (excludes this block's header) */
static inline size_t block_data_size(const block_header_t *block)
{
return (block->size & ~0x03);
}
/* Check a block is valid for this heap. Used to verify parameters. */
static void assert_valid_block(const heap_t *heap, const block_header_t *block)
__attribute__((noinline)) NOCLONE_ATTR static void assert_valid_block(const heap_t *heap, const block_header_t *block)
{
pool_t pool = tlsf_get_pool(heap->heap_data);
void *ptr = block_to_ptr(block);
@ -110,8 +98,7 @@ static void assert_valid_block(const heap_t *heap, const block_header_t *block)
void *multi_heap_get_block_address_impl(multi_heap_block_handle_t block)
{
void *ptr = block_to_ptr(block);
return (ptr);
return block_to_ptr(block);
}
size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
@ -122,7 +109,7 @@ size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
{
assert(start_ptr);
if(size < (tlsf_size() + tlsf_block_size_min() + sizeof(heap_t))) {
if(size < (sizeof(heap_t))) {
//Region too small to be a heap.
return NULL;
}
@ -130,13 +117,16 @@ multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
heap_t *result = (heap_t *)start_ptr;
size -= sizeof(heap_t);
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size);
/* Do not specify any maximum size for the allocations so that the default configuration is used */
const size_t max_bytes = 0;
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, max_bytes);
if(!result->heap_data) {
return NULL;
}
result->lock = NULL;
result->free_bytes = size - tlsf_size();
result->free_bytes = size - tlsf_size(result->heap_data);
result->pool_size = size;
result->minimum_free_bytes = result->free_bytes;
return result;
@ -147,12 +137,12 @@ void multi_heap_set_lock(multi_heap_handle_t heap, void *lock)
heap->lock = lock;
}
void inline multi_heap_internal_lock(multi_heap_handle_t heap)
void multi_heap_internal_lock(multi_heap_handle_t heap)
{
MULTI_HEAP_LOCK(heap->lock);
}
void inline multi_heap_internal_unlock(multi_heap_handle_t heap)
void multi_heap_internal_unlock(multi_heap_handle_t heap)
{
MULTI_HEAP_UNLOCK(heap->lock);
}
@ -172,7 +162,7 @@ multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, mu
assert_valid_block(heap, block);
block_header_t* next = block_next(block);
if(block_data_size(next) == 0) {
if(block_size(next) == 0) {
//Last block:
return NULL;
} else {
@ -183,7 +173,7 @@ multi_heap_block_handle_t multi_heap_get_next_block(multi_heap_handle_t heap, mu
bool multi_heap_is_free(multi_heap_block_handle_t block)
{
return is_free(block);
return block_is_free(block);
}
void *multi_heap_malloc_impl(multi_heap_handle_t heap, size_t size)
@ -309,7 +299,7 @@ bool multi_heap_check(multi_heap_handle_t heap, bool print_errors)
return valid;
}
static void multi_heap_dump_tlsf(void* ptr, size_t size, int used, void* user)
__attribute__((noinline)) static void multi_heap_dump_tlsf(void* ptr, size_t size, int used, void* user)
{
(void)user;
MULTI_HEAP_STDERR_PRINTF("Block %p data, size: %d bytes, Free: %s \n",
@ -346,7 +336,7 @@ size_t multi_heap_minimum_free_size_impl(multi_heap_handle_t heap)
return heap->minimum_free_bytes;
}
static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* user)
__attribute__((noinline)) static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* user)
{
multi_heap_info_t *info = user;
@ -365,9 +355,7 @@ static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* use
void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
{
uint32_t sl_interval;
uint32_t overhead;
memset(info, 0, sizeof(multi_heap_info_t));
if (heap == NULL) {
@ -379,12 +367,9 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
/* TLSF has an overhead per block. Calculate the total amount of overhead, it shall not be
* part of the allocated bytes */
overhead = info->allocated_blocks * tlsf_alloc_overhead();
info->total_allocated_bytes = (heap->pool_size - tlsf_size()) - heap->free_bytes - overhead;
info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes - overhead;
info->minimum_free_bytes = heap->minimum_free_bytes;
info->total_free_bytes = heap->free_bytes;
if (info->largest_free_block) {
sl_interval = (1 << (31 - __builtin_clz(info->largest_free_block))) / SL_INDEX_COUNT;
info->largest_free_block = info->largest_free_block & ~(sl_interval - 1);
}
info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
multi_heap_internal_unlock(heap);
}

View File

@ -13,6 +13,14 @@
// limitations under the License.
#pragma once
/* Define a noclone attribute when compiled with GCC as certain functions
* in the heap component should not be cloned by the compiler */
#if defined __has_attribute && __has_attribute(noclone)
#define NOCLONE_ATTR __attribute((noclone))
#else
#define NOCLONE_ATTR
#endif
/* Opaque handle to a heap block */
typedef const struct block_header_t *multi_heap_block_handle_t;

View File

@ -65,7 +65,7 @@ typedef struct {
Returns the pointer to the actual usable data buffer (ie after 'head')
*/
static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
__attribute__((noinline)) static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
{
uint8_t *data = (uint8_t *)(&head[1]); /* start of data ie 'real' allocated buffer */
poison_tail_t *tail = (poison_tail_t *)(data + alloc_size);
@ -89,7 +89,7 @@ static uint8_t *poison_allocated_region(poison_head_t *head, size_t alloc_size)
Returns a pointer to the poison header structure, or NULL if the poison structures are corrupt.
*/
static poison_head_t *verify_allocated_region(void *data, bool print_errors)
__attribute__((noinline)) static poison_head_t *verify_allocated_region(void *data, bool print_errors)
{
poison_head_t *head = (poison_head_t *)((intptr_t)data - sizeof(poison_head_t));
poison_tail_t *tail = (poison_tail_t *)((intptr_t)data + head->alloc_size);
@ -131,8 +131,12 @@ static poison_head_t *verify_allocated_region(void *data, bool print_errors)
if swap_pattern is true, swap patterns in the buffer (ie replace MALLOC_FILL_PATTERN with FREE_FILL_PATTERN, and vice versa.)
Returns true if verification checks out.
This function has the attribute noclone to prevent the compiler to create a clone on flash where expect_free is removed (as this
function is called only with expect_free == true throughout the component).
*/
static bool verify_fill_pattern(void *data, size_t size, bool print_errors, bool expect_free, bool swap_pattern)
__attribute__((noinline)) NOCLONE_ATTR
static bool verify_fill_pattern(void *data, size_t size, const bool print_errors, const bool expect_free, bool swap_pattern)
{
const uint32_t FREE_FILL_WORD = (FREE_FILL_PATTERN << 24) | (FREE_FILL_PATTERN << 16) | (FREE_FILL_PATTERN << 8) | FREE_FILL_PATTERN;
const uint32_t MALLOC_FILL_WORD = (MALLOC_FILL_PATTERN << 24) | (MALLOC_FILL_PATTERN << 16) | (MALLOC_FILL_PATTERN << 8) | MALLOC_FILL_PATTERN;
@ -242,7 +246,9 @@ void *multi_heap_malloc(multi_heap_handle_t heap, size_t size)
return data;
}
void multi_heap_free(multi_heap_handle_t heap, void *p)
/* This function has the noclone attribute to prevent the compiler to optimize out the
* check for p == NULL and create a clone function placed in flash. */
NOCLONE_ATTR void multi_heap_free(multi_heap_handle_t heap, void *p)
{
if (p == NULL) {
return;

View File

@ -1,3 +1,18 @@
idf_component_register(SRC_DIRS "."
PRIV_INCLUDE_DIRS "."
PRIV_REQUIRES cmock test_utils heap)
if(CONFIG_COMPILER_DUMP_RTL_FILES)
idf_build_get_property(elf_file_name EXECUTABLE GENERATOR_EXPRESSION)
add_custom_target(check_test_app_sections ALL
COMMAND ${PYTHON} $ENV{IDF_PATH}/tools/ci/check_callgraph.py
--rtl-dir ${CMAKE_BINARY_DIR}/esp-idf/heap/
--elf-file ${CMAKE_BINARY_DIR}/${elf_file_name}
find-refs
--from-sections=.iram0.text
--to-sections=.flash.text,.flash.rodata
--ignore-symbols=__func__/__assert_func,__func__/heap_caps_alloc_failed
--exit-code
DEPENDS ${elf_file_name}
)
endif()

View File

@ -6,6 +6,24 @@
#include <string.h>
#include <assert.h>
/* The functions __malloc__ and __free__ are used to call the libc
* malloc and free and allocate memory from the host heap. Since the test
* `TEST_CASE("multi_heap many random allocations", "[multi_heap]")`
* calls multi_heap_allocation_impl() with sizes that can go up to 8MB,
* an allocatation on the heap will be prefered rather than the stack which
* might not have the necessary memory.
*/
static void *__malloc__(size_t bytes)
{
return malloc(bytes);
}
static void __free__(void *ptr)
{
free(ptr);
}
/* Insurance against accidentally using libc heap functions in tests */
#undef free
#define free #error
@ -59,10 +77,11 @@ TEST_CASE("multi_heap simple allocations", "[multi_heap]")
TEST_CASE("multi_heap fragmentation", "[multi_heap]")
{
uint8_t small_heap[4 * 1024];
const size_t HEAP_SIZE = 4 * 1024;
uint8_t small_heap[HEAP_SIZE];
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
const size_t alloc_size = 128;
const size_t alloc_size = 500;
void *p[4];
for (int i = 0; i < 4; i++) {
@ -202,20 +221,22 @@ TEST_CASE("multi_heap defrag realloc", "[multi_heap]")
#endif
TEST_CASE("multi_heap many random allocations", "[multi_heap]")
void multi_heap_allocation_impl(int heap_size)
{
uint8_t big_heap[8 * 1024];
uint8_t *big_heap = (uint8_t *) __malloc__(heap_size);
const int NUM_POINTERS = 64;
printf("Running multi-allocation test...\n");
printf("Running multi-allocation test with heap_size %d...\n", heap_size);
REQUIRE( big_heap );
multi_heap_handle_t heap = multi_heap_register(big_heap, heap_size);
void *p[NUM_POINTERS] = { 0 };
size_t s[NUM_POINTERS] = { 0 };
multi_heap_handle_t heap = multi_heap_register(big_heap, sizeof(big_heap));
const size_t initial_free = multi_heap_free_size(heap);
const int ITERATIONS = 10000;
const int ITERATIONS = 5000;
for (int i = 0; i < ITERATIONS; i++) {
/* check all pointers allocated so far are valid inside big_heap */
@ -226,11 +247,11 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
uint8_t n = rand() % NUM_POINTERS;
if (rand() % 4 == 0) {
if (i % 4 == 0) {
/* 1 in 4 iterations, try to realloc the buffer instead
of using malloc/free
*/
size_t new_size = rand() % 1024;
size_t new_size = (rand() % 1023) + 1;
void *new_p = multi_heap_realloc(heap, p[n], new_size);
printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size);
multi_heap_check(heap, true);
@ -239,13 +260,12 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
s[n] = new_size;
if (new_size > 0) {
REQUIRE( p[n] >= big_heap );
REQUIRE( p[n] < big_heap + sizeof(big_heap) );
REQUIRE( p[n] < big_heap + heap_size );
memset(p[n], n, new_size);
}
}
continue;
}
if (p[n] != NULL) {
if (s[n] > 0) {
/* Verify pre-existing contents of p[n] */
@ -269,14 +289,13 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
printf("malloc %p (%zu)\n", p[n], s[n]);
if (p[n] != NULL) {
REQUIRE( p[n] >= big_heap );
REQUIRE( p[n] < big_heap + sizeof(big_heap) );
REQUIRE( p[n] < big_heap + heap_size );
}
if (!multi_heap_check(heap, true)) {
printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
multi_heap_dump(heap);
REQUIRE(0);
}
if (p[n] != NULL) {
memset(p[n], n, s[n]);
}
@ -292,6 +311,15 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
}
REQUIRE( initial_free == multi_heap_free_size(heap) );
__free__(big_heap);
}
TEST_CASE("multi_heap many random allocations", "[multi_heap]")
{
size_t poolsize[] = { 15, 255, 4095, 8191 };
for (size_t i = 0; i < sizeof(poolsize)/sizeof(size_t); i++) {
multi_heap_allocation_impl(poolsize[i] * 1024);
}
}
TEST_CASE("multi_heap_get_info() function", "[multi_heap]")
@ -391,8 +419,9 @@ TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]")
TEST_CASE("multi_heap_realloc()", "[multi_heap]")
{
const size_t HEAP_SIZE = 4 * 1024;
const uint32_t PATTERN = 0xABABDADA;
uint8_t small_heap[4 * 1024];
uint8_t small_heap[HEAP_SIZE];
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64);
@ -402,7 +431,6 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
REQUIRE( b > a); /* 'b' takes the block after 'a' */
*a = PATTERN;
uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72);
REQUIRE( multi_heap_check(heap, true));
REQUIRE( c != NULL );
@ -412,13 +440,12 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
#ifndef MULTI_HEAP_POISONING_SLOW
// "Slow" poisoning implementation doesn't reallocate in place, so these
// test will fail...
uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36);
REQUIRE( multi_heap_check(heap, true) );
REQUIRE( c == d ); /* 'c' block should be shrunk in-place */
REQUIRE( *d == PATTERN);
uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64);
// biggest allocation possible to completely fill the block left free after it was reallocated
uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 60);
REQUIRE( multi_heap_check(heap, true));
REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */
@ -427,11 +454,7 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
REQUIRE( multi_heap_check(heap, true) );
REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */
#ifdef MULTI_HEAP_POISONING
#define TOO_MUCH 7420 + 1
#else
#define TOO_MUCH 7420 + 1
#endif
#define TOO_MUCH HEAP_SIZE + 1
/* not enough contiguous space left in the heap */
uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH);
REQUIRE( g == NULL );
@ -441,7 +464,8 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
g = (uint32_t *)multi_heap_realloc(heap, e, 128);
REQUIRE( multi_heap_check(heap, true) );
REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */
#endif
#endif // MULTI_HEAP_POISONING_SLOW
}
// TLSF only accepts heaps aligned to 4-byte boundary so

View File

@ -130,7 +130,28 @@ Thread Safety
Heap functions are thread safe, meaning they can be called from different tasks simultaneously without any limitations.
It is technically possible to call ``malloc``, ``free``, and related functions from interrupt handler (ISR) context. However this is not recommended, as heap function calls may delay other interrupts. It is strongly recommended to refactor applications so that any buffers used by an ISR are pre-allocated outside of the ISR. Support for calling heap functions from ISRs may be removed in a future update.
It is technically possible to call ``malloc``, ``free``, and related functions from interrupt handler (ISR) context (see :ref:`calling-heap-related-functions-from-isr`). However this is not recommended, as heap function calls may delay other interrupts. It is strongly recommended to refactor applications so that any buffers used by an ISR are pre-allocated outside of the ISR. Support for calling heap functions from ISRs may be removed in a future update.
.. _calling-heap-related-functions-from-isr:
Calling heap related functions from ISR
---------------------------------------
The following functions from the heap component can be called form interrupt handler (ISR):
* :cpp:func:`heap_caps_malloc`
* :cpp:func:`heap_caps_malloc_default`
* :cpp:func:`heap_caps_realloc_default`
* :cpp:func:`heap_caps_malloc_prefer`
* :cpp:func:`heap_caps_realloc_prefer`
* :cpp:func:`heap_caps_calloc_prefer`
* :cpp:func:`heap_caps_free`
* :cpp:func:`heap_caps_realloc`
* :cpp:func:`heap_caps_calloc`
* :cpp:func:`heap_caps_aligned_alloc`
* :cpp:func:`heap_caps_aligned_free`
Note however this practice is strongly discouraged.
Heap Tracing & Debugging
------------------------

View File

@ -110,6 +110,11 @@ class Reference(object):
)
class IgnorePair():
def __init__(self, pair): # type: (str) -> None
self.symbol, self.function_call = pair.split('/')
class ElfInfo(object):
def __init__(self, elf_file): # type: (BinaryIO) -> None
self.elf_file = elf_file
@ -174,7 +179,7 @@ class ElfInfo(object):
return None
def load_rtl_file(rtl_filename, tu_filename, functions): # type: (str, str, List[RtlFunction]) -> None
def load_rtl_file(rtl_filename, tu_filename, functions, ignore_pairs): # type: (str, str, List[RtlFunction], List[IgnorePair]) -> None
last_function = None # type: Optional[RtlFunction]
for line in open(rtl_filename):
# Find function definition
@ -190,6 +195,17 @@ def load_rtl_file(rtl_filename, tu_filename, functions): # type: (str, str, Lis
match = re.match(CALL_REGEX, line)
if match:
target = match.group('target')
# if target matches on of the IgnorePair function_call attributes, remove
# the last occurrence of the associated symbol from the last_function.refs list.
call_matching_pairs = [pair for pair in ignore_pairs if pair.function_call == target]
if call_matching_pairs and last_function and last_function.refs:
for pair in call_matching_pairs:
ignored_symbols = [ref for ref in last_function.refs if pair.symbol in ref]
if ignored_symbols:
last_ref = ignored_symbols.pop()
last_function.refs = [ref for ref in last_function.refs if last_ref != ref]
if target not in last_function.calls:
last_function.calls.append(target)
continue
@ -319,12 +335,12 @@ def match_rtl_funcs_to_symbols(rtl_functions, elfinfo): # type: (List[RtlFuncti
return symbols, refs
def get_symbols_and_refs(rtl_list, elf_file): # type: (List[str], BinaryIO) -> Tuple[List[Symbol], List[Reference]]
def get_symbols_and_refs(rtl_list, elf_file, ignore_pairs): # type: (List[str], BinaryIO, List[IgnorePair]) -> Tuple[List[Symbol], List[Reference]]
elfinfo = ElfInfo(elf_file)
rtl_functions = [] # type: List[RtlFunction]
for file_name in rtl_list:
load_rtl_file(file_name, file_name, rtl_functions)
load_rtl_file(file_name, file_name, rtl_functions, ignore_pairs)
return match_rtl_funcs_to_symbols(rtl_functions, elfinfo)
@ -376,6 +392,10 @@ def main():
find_refs_parser.add_argument(
'--to-sections', help='comma-separated list of target sections'
)
find_refs_parser.add_argument(
'--ignore-symbols', help='comma-separated list of symbol/function_name pairs. \
This will force the parser to ignore the symbol preceding the call to function_name'
)
find_refs_parser.add_argument(
'--exit-code',
action='store_true',
@ -399,7 +419,11 @@ def main():
if not rtl_list:
raise RuntimeError('No RTL files specified')
_, refs = get_symbols_and_refs(rtl_list, args.elf_file)
ignore_pairs = []
for pair in args.ignore_symbols.split(',') if args.ignore_symbols else []:
ignore_pairs.append(IgnorePair(pair))
_, refs = get_symbols_and_refs(rtl_list, args.elf_file, ignore_pairs)
if args.action == 'find-refs':
from_sections = args.from_sections.split(',') if args.from_sections else []

View File

@ -26,3 +26,4 @@ CONFIG_SPIRAM_BANKSWITCH_ENABLE=n
CONFIG_FATFS_ALLOC_PREFER_EXTRAM=y
CONFIG_UNITY_ENABLE_BACKTRACE_ON_FAIL=y
CONFIG_ESP_NETIF_TCPIP_ADAPTER_COMPATIBLE_LAYER=n
CONFIG_COMPILER_DUMP_RTL_FILES=y