From 8325aff3c992be7a4ae29e8ec23b13f4cb0af53f Mon Sep 17 00:00:00 2001 From: Philippe Date: Thu, 4 Nov 2021 15:07:49 -0700 Subject: [PATCH 1/7] dynamic control block per heap --- components/heap/heap_tlsf.c | 145 +++++++++++++------- components/heap/heap_tlsf.h | 22 ++- components/heap/heap_tlsf_block_functions.h | 4 +- components/heap/heap_tlsf_config.h | 72 +--------- components/heap/multi_heap.c | 15 +- 5 files changed, 124 insertions(+), 134 deletions(-) diff --git a/components/heap/heap_tlsf.c b/components/heap/heap_tlsf.c index 8251796339..69a105a437 100644 --- a/components/heap/heap_tlsf.c +++ b/components/heap/heap_tlsf.c @@ -90,12 +90,6 @@ tlsf_static_assert(sizeof(int) * CHAR_BIT == 32); tlsf_static_assert(sizeof(size_t) * CHAR_BIT >= 32); tlsf_static_assert(sizeof(size_t) * CHAR_BIT <= 64); -/* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ -tlsf_static_assert(sizeof(unsigned int) * CHAR_BIT >= SL_INDEX_COUNT); - -/* Ensure we've properly tuned our sizes. */ -tlsf_static_assert(ALIGN_SIZE == SMALL_BLOCK_SIZE / SL_INDEX_COUNT); - static inline __attribute__((__always_inline__)) size_t align_up(size_t x, size_t align) { tlsf_assert(0 == (align & (align - 1)) && "must align to a power of two"); @@ -120,7 +114,7 @@ static inline __attribute__((__always_inline__)) void* align_ptr(const void* ptr ** Adjust an allocation size to be aligned to word size, and no smaller ** than internal minimum. */ -static inline __attribute__((__always_inline__)) size_t adjust_request_size(size_t size, size_t align) +static inline __attribute__((__always_inline__)) size_t adjust_request_size(tlsf_t tlsf, size_t size, size_t align) { size_t adjust = 0; if (size) @@ -128,7 +122,7 @@ static inline __attribute__((__always_inline__)) size_t adjust_request_size(size const size_t aligned = align_up(size, align); /* aligned sized must not exceed block_size_max or we'll go out of bounds on sl_bitmap */ - if (aligned < block_size_max) + if (aligned < tlsf_block_size_max(tlsf)) { adjust = tlsf_max(aligned, block_size_min); } @@ -141,10 +135,10 @@ static inline __attribute__((__always_inline__)) size_t adjust_request_size(size ** the documentation found in the white paper. */ -static inline __attribute__((__always_inline__)) void mapping_insert(size_t size, int* fli, int* sli) +static inline __attribute__((__always_inline__)) void mapping_insert(control_t *control, size_t size, int* fli, int* sli) { int fl, sl; - if (size < SMALL_BLOCK_SIZE) + if (size < control->small_block_size) { /* Store small blocks in first list. */ fl = 0; @@ -153,22 +147,22 @@ static inline __attribute__((__always_inline__)) void mapping_insert(size_t size else { fl = tlsf_fls(size); - sl = tlsf_cast(int, size >> (fl - SL_INDEX_COUNT_LOG2)) ^ (1 << SL_INDEX_COUNT_LOG2); - fl -= (FL_INDEX_SHIFT - 1); + sl = tlsf_cast(int, size >> (fl - control->sl_index_count_log2)) ^ (1 << control->sl_index_count_log2); + fl -= (control->fl_index_shift - 1); } *fli = fl; *sli = sl; } /* This version rounds up to the next block size (for allocations) */ -static inline __attribute__((__always_inline__)) void mapping_search(size_t size, int* fli, int* sli) +static inline __attribute__((__always_inline__)) void mapping_search(control_t *control, size_t size, int* fli, int* sli) { - if (size >= SMALL_BLOCK_SIZE) + if (size >= control->small_block_size) { - const size_t round = (1 << (tlsf_fls(size) - SL_INDEX_COUNT_LOG2)) - 1; + const size_t round = (1 << (tlsf_fls(size) - control->sl_index_count_log2)) - 1; size += round; } - mapping_insert(size, fli, sli); + mapping_insert(control, size, fli, sli); } static inline __attribute__((__always_inline__)) block_header_t* search_suitable_block(control_t* control, int* fli, int* sli) @@ -200,7 +194,7 @@ static inline __attribute__((__always_inline__)) block_header_t* search_suitable *sli = sl; /* Return the first block in the free list. */ - return control->blocks[fl][sl]; + return control->blocks[fl*control->sl_index_count + sl]; } /* Remove a free block from the free list.*/ @@ -214,9 +208,9 @@ static inline __attribute__((__always_inline__)) void remove_free_block(control_ prev->next_free = next; /* If this block is the head of the free list, set new head. */ - if (control->blocks[fl][sl] == block) + if (control->blocks[fl*control->sl_index_count + sl] == block) { - control->blocks[fl][sl] = next; + control->blocks[fl*control->sl_index_count + sl] = next; /* If the new head is null, clear the bitmap. */ if (next == &control->block_null) @@ -235,7 +229,7 @@ static inline __attribute__((__always_inline__)) void remove_free_block(control_ /* Insert a free block into the free block list. */ static inline __attribute__((__always_inline__)) void insert_free_block(control_t* control, block_header_t* block, int fl, int sl) { - block_header_t* current = control->blocks[fl][sl]; + block_header_t* current = control->blocks[fl*control->sl_index_count + sl]; tlsf_assert(current && "free list cannot have a null entry"); tlsf_assert(block && "cannot insert a null entry into the free list"); block->next_free = current; @@ -248,7 +242,7 @@ static inline __attribute__((__always_inline__)) void insert_free_block(control_ ** Insert the new block at the head of the list, and mark the first- ** and second-level bitmaps appropriately. */ - control->blocks[fl][sl] = block; + control->blocks[fl*control->sl_index_count + sl] = block; control->fl_bitmap |= (1 << fl); control->sl_bitmap[fl] |= (1 << sl); } @@ -257,7 +251,7 @@ static inline __attribute__((__always_inline__)) void insert_free_block(control_ static inline __attribute__((__always_inline__)) void block_remove(control_t* control, block_header_t* block) { int fl, sl; - mapping_insert(block_size(block), &fl, &sl); + mapping_insert(control, block_size(block), &fl, &sl); remove_free_block(control, block, fl, sl); } @@ -265,7 +259,7 @@ static inline __attribute__((__always_inline__)) void block_remove(control_t* co static inline __attribute__((__always_inline__)) void block_insert(control_t* control, block_header_t* block) { int fl, sl; - mapping_insert(block_size(block), &fl, &sl); + mapping_insert(control, block_size(block), &fl, &sl); insert_free_block(control, block, fl, sl); } @@ -428,7 +422,7 @@ static inline __attribute__((__always_inline__)) block_header_t* block_locate_f if (size) { - mapping_search(size, &fl, &sl); + mapping_search(control, size, &fl, &sl); /* ** mapping_search can futz with the size, so for excessively large sizes it can sometimes wind up @@ -436,7 +430,7 @@ static inline __attribute__((__always_inline__)) block_header_t* block_locate_f ** So, we protect against that here, since this is the only callsite of mapping_search. ** Note that we don't need to check sl, since it comes from a modulo operation that guarantees it's always in range. */ - if (fl < FL_INDEX_COUNT) + if (fl < control->fl_index_count) { block = search_suitable_block(control, &fl, &sl); } @@ -465,20 +459,44 @@ static inline __attribute__((__always_inline__)) void* block_prepare_used(contro } /* Clear structure and point all empty lists at the null block. */ -static void control_construct(control_t* control) +static void control_construct(control_t* control, size_t bytes) { int i, j; control->block_null.next_free = &control->block_null; control->block_null.prev_free = &control->block_null; + /* find the closest ^2 for first layer */ + i = (bytes - 1) / (16 * 1024); + control->fl_index_max = FL_INDEX_MAX_MIN + sizeof(i) * 8 - __builtin_clz(i); + + /* adapt second layer to the pool */ + if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3; + else if (bytes <= 256 * 1024) control->sl_index_count_log2 = 4; + else control->sl_index_count_log2 = 5; + + control->fl_index_shift = (control->sl_index_count_log2 + ALIGN_SIZE_LOG2); + control->sl_index_count = 1 << control->sl_index_count_log2; + control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1; + control->small_block_size = 1 << control->fl_index_shift; control->fl_bitmap = 0; - for (i = 0; i < FL_INDEX_COUNT; ++i) + + control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap)); + control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks)); + control->size = (void*) (control->blocks + control->sl_index_count * control->fl_index_count) - (void*) control; + + /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ + tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count"); + + /* Ensure we've properly tuned our sizes. */ + tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match"); + + for (i = 0; i < control->fl_index_count; ++i) { control->sl_bitmap[i] = 0; - for (j = 0; j < SL_INDEX_COUNT; ++j) + for (j = 0; j < control->sl_index_count; ++j) { - control->blocks[i][j] = &control->block_null; + control->blocks[i*control->sl_index_count + j] = &control->block_null; } } } @@ -524,14 +542,14 @@ int tlsf_check(tlsf_t tlsf) int status = 0; /* Check that the free lists and bitmaps are accurate. */ - for (i = 0; i < FL_INDEX_COUNT; ++i) + for (i = 0; i < control->fl_index_count; ++i) { - for (j = 0; j < SL_INDEX_COUNT; ++j) + for (j = 0; j < control->sl_index_count; ++j) { const int fl_map = control->fl_bitmap & (1 << i); const int sl_list = control->sl_bitmap[i]; const int sl_map = sl_list & (1 << j); - const block_header_t* block = control->blocks[i][j]; + const block_header_t* block = control->blocks[i*control->sl_index_count + j]; /* Check that first- and second-level lists agree. */ if (!fl_map) @@ -559,7 +577,7 @@ int tlsf_check(tlsf_t tlsf) tlsf_insist(block_is_prev_free(block_next(block)) && "block should be free"); tlsf_insist(block_size(block) >= block_size_min && "block not minimum size"); - mapping_insert(block_size(block), &fli, &sli); + mapping_insert(control, block_size(block), &fli, &sli); tlsf_insist(fli == i && sli == j && "block size indexed in wrong list"); #ifdef MULTI_HEAP_POISONING @@ -631,13 +649,37 @@ int tlsf_check_pool(pool_t pool) return integ.status; } +size_t tlsf_fit_size(tlsf_t tlsf, size_t size) +{ + /* because it's GoodFit, allocable size is one range lower */ + if (size) + { + size_t sl_interval; + control_t* control = tlsf_cast(control_t*, tlsf); + sl_interval = (1 << ((sizeof(size_t) * 8 - 1) - __builtin_clz(size))) / control->sl_index_count; + return size & ~(sl_interval - 1); + } + + return 0; +} + + /* ** Size of the TLSF structures in a given memory block passed to ** tlsf_create, equal to the size of a control_t */ -size_t tlsf_size(void) +size_t tlsf_size(tlsf_t tlsf) { - return sizeof(control_t); + if (tlsf) + { + control_t* control = tlsf_cast(control_t*, tlsf); + return control->size; + } + + /* no tlsf, we'll just return a min size */ + return sizeof(control_t) + + sizeof(int) * SL_INDEX_COUNT_MIN + + sizeof(block_header_t*) * SL_INDEX_COUNT_MIN * FL_INDEX_COUNT_MIN; } size_t tlsf_align_size(void) @@ -650,9 +692,10 @@ size_t tlsf_block_size_min(void) return block_size_min; } -size_t tlsf_block_size_max(void) +size_t tlsf_block_size_max(tlsf_t tlsf) { - return block_size_max; + control_t* control = tlsf_cast(control_t*, tlsf); + return tlsf_cast(size_t, 1) << control->fl_index_max; } /* @@ -685,16 +728,16 @@ pool_t tlsf_add_pool(tlsf_t tlsf, void* mem, size_t bytes) return 0; } - if (pool_bytes < block_size_min || pool_bytes > block_size_max) + if (pool_bytes < block_size_min || pool_bytes > tlsf_block_size_max(tlsf)) { #if defined (TLSF_64BIT) printf("tlsf_add_pool: Memory size must be between 0x%x and 0x%x00 bytes.\n", (unsigned int)(pool_overhead + block_size_min), - (unsigned int)((pool_overhead + block_size_max) / 256)); + (unsigned int)((pool_overhead + tlsf_block_size_max(tlsf)) / 256)); #else printf("tlsf_add_pool: Memory size must be between %u and %u bytes.\n", (unsigned int)(pool_overhead + block_size_min), - (unsigned int)(pool_overhead + block_size_max)); + (unsigned int)(pool_overhead + tlsf_block_size_max(tlsf))); #endif return 0; } @@ -730,7 +773,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool) tlsf_assert(!block_is_free(block_next(block)) && "next block should not be free"); tlsf_assert(block_size(block_next(block)) == 0 && "next block size should be zero"); - mapping_insert(block_size(block), &fl, &sl); + mapping_insert(control, block_size(block), &fl, &sl); remove_free_block(control, block, fl, sl); } @@ -739,7 +782,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool) */ -tlsf_t tlsf_create(void* mem) +tlsf_t tlsf_create(void* mem, size_t bytes) { #if _DEBUG if (test_ffs_fls()) @@ -755,27 +798,27 @@ tlsf_t tlsf_create(void* mem) return 0; } - control_construct(tlsf_cast(control_t*, mem)); + control_construct(tlsf_cast(control_t*, mem), bytes); return tlsf_cast(tlsf_t, mem); } pool_t tlsf_get_pool(tlsf_t tlsf) { - return tlsf_cast(pool_t, (char*)tlsf + tlsf_size()); + return tlsf_cast(pool_t, (char*)tlsf + tlsf_size(tlsf)); } -tlsf_t tlsf_create_with_pool(void* mem, size_t bytes) +tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes) { - tlsf_t tlsf = tlsf_create(mem); - tlsf_add_pool(tlsf, (char*)mem + tlsf_size(), bytes - tlsf_size()); + tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes); + tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf)); return tlsf; } void* tlsf_malloc(tlsf_t tlsf, size_t size) { control_t* control = tlsf_cast(control_t*, tlsf); - size_t adjust = adjust_request_size(size, ALIGN_SIZE); + size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE); block_header_t* block = block_locate_free(control, adjust); return block_prepare_used(control, block, adjust); } @@ -806,7 +849,7 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size) void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_offset) { control_t* control = tlsf_cast(control_t*, tlsf); - const size_t adjust = adjust_request_size(size, ALIGN_SIZE); + const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE); const size_t off_adjust = align_up(data_offset, ALIGN_SIZE); /* @@ -821,7 +864,7 @@ void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_off /* The offset is included in both `adjust` and `gap_minimum`, so we ** need to subtract it once. */ - const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum - off_adjust, align); + const size_t size_with_gap = adjust_request_size(tlsf, adjust + align + gap_minimum - off_adjust, align); /* ** If alignment is less than or equal to base alignment, we're done, because @@ -934,7 +977,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) const size_t cursize = block_size(block); const size_t combined = cursize + block_size(next) + block_header_overhead; - const size_t adjust = adjust_request_size(size, ALIGN_SIZE); + const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE); tlsf_assert(!block_is_free(block) && "block already marked as free"); diff --git a/components/heap/heap_tlsf.h b/components/heap/heap_tlsf.h index 58212dbb3c..4347a0d0c8 100644 --- a/components/heap/heap_tlsf.h +++ b/components/heap/heap_tlsf.h @@ -78,12 +78,21 @@ typedef struct control_t /* Empty lists point at this block to indicate they are free. */ block_header_t block_null; + /* Local parameter for the pool */ + unsigned int fl_index_count; + unsigned int fl_index_shift; + unsigned int fl_index_max; + unsigned int sl_index_count; + unsigned int sl_index_count_log2; + unsigned int small_block_size; + size_t size; + /* Bitmaps for free lists. */ unsigned int fl_bitmap; - unsigned int sl_bitmap[FL_INDEX_COUNT]; + unsigned int *sl_bitmap; /* Head of free lists. */ - block_header_t* blocks[FL_INDEX_COUNT][SL_INDEX_COUNT]; + block_header_t** blocks; } control_t; #include "heap_tlsf_block_functions.h" @@ -94,8 +103,8 @@ typedef void* tlsf_t; typedef void* pool_t; /* Create/destroy a memory pool. */ -tlsf_t tlsf_create(void* mem); -tlsf_t tlsf_create_with_pool(void* mem, size_t bytes); +tlsf_t tlsf_create(void* mem, size_t bytes); +tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes); pool_t tlsf_get_pool(tlsf_t tlsf); /* Add/remove memory pools. */ @@ -113,12 +122,13 @@ void tlsf_free(tlsf_t tlsf, void* ptr); size_t tlsf_block_size(void* ptr); /* Overheads/limits of internal structures. */ -size_t tlsf_size(void); +size_t tlsf_size(tlsf_t tlsf); size_t tlsf_align_size(void); size_t tlsf_block_size_min(void); -size_t tlsf_block_size_max(void); +size_t tlsf_block_size_max(tlsf_t tlsf); size_t tlsf_pool_overhead(void); size_t tlsf_alloc_overhead(void); +size_t tlsf_fit_size(tlsf_t tlsf, size_t size); /* Debugging. */ typedef void (*tlsf_walker)(void* ptr, size_t size, int used, void* user); diff --git a/components/heap/heap_tlsf_block_functions.h b/components/heap/heap_tlsf_block_functions.h index 77119e39a2..efc92c9c28 100644 --- a/components/heap/heap_tlsf_block_functions.h +++ b/components/heap/heap_tlsf_block_functions.h @@ -63,9 +63,11 @@ ** A free block must be large enough to store its header minus the size of ** the prev_phys_block field, and no larger than the number of addressable ** bits for FL_INDEX. +** The block_size_max macro returns the maximum block for the minimum pool +** use tlsf_block_size_max for a value specific to the pool */ #define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*)) -#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX) +#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN) /* ** block_header_t member functions. diff --git a/components/heap/heap_tlsf_config.h b/components/heap/heap_tlsf_config.h index f26daf81f6..bb378e8a2a 100644 --- a/components/heap/heap_tlsf_config.h +++ b/components/heap/heap_tlsf_config.h @@ -37,23 +37,13 @@ #pragma once -#ifdef ESP_PLATFORM - -#include "soc/soc.h" - -#if !CONFIG_SPIRAM -#define TLSF_MAX_POOL_SIZE (SOC_DIRAM_DRAM_HIGH - SOC_DIRAM_DRAM_LOW) -#else -#define TLSF_MAX_POOL_SIZE SOC_EXTRAM_DATA_SIZE -#endif - enum tlsf_config { /* log2 of number of linear subdivisions of block sizes. Larger ** values require more memory in the control structure. Values of - ** 4 or 5 are typical. + ** 4 or 5 are typical, 3 is for very small pools. */ - SL_INDEX_COUNT_LOG2 = 5, + SL_INDEX_COUNT_LOG2_MIN = 3, /* All allocation sizes and addresses are aligned to 4 bytes. */ ALIGN_SIZE_LOG2 = 2, @@ -68,59 +58,9 @@ enum tlsf_config ** trying to split size ranges into more slots than we have available. ** Instead, we calculate the minimum threshold size, and place all ** blocks below that size into the 0th first-level list. + ** Values below are the absolute minimum to accept a pool addition */ - - /* Tunning the first level, we can reduce TLSF pool overhead - * in exchange of manage a pool smaller than 4GB - */ - #if (TLSF_MAX_POOL_SIZE <= (256 * 1024)) - FL_INDEX_MAX = 18, //Each pool can have up 256KB - #elif (TLSF_MAX_POOL_SIZE <= (512 * 1024)) - FL_INDEX_MAX = 19, //Each pool can have up 512KB - #elif (TLSF_MAX_POOL_SIZE <= (1 * 1024 * 1024)) - FL_INDEX_MAX = 20, //Each pool can have up 1MB - #elif (TLSF_MAX_POOL_SIZE <= (2 * 1024 * 1024)) - FL_INDEX_MAX = 21, //Each pool can have up 2MB - #elif (TLSF_MAX_POOL_SIZE <= (4 * 1024 * 1024)) - FL_INDEX_MAX = 22, //Each pool can have up 4MB - #elif (TLSF_MAX_POOL_SIZE <= (8 * 1024 * 1024)) - FL_INDEX_MAX = 23, //Each pool can have up 8MB - #elif (TLSF_MAX_POOL_SIZE <= (16 * 1024 * 1024)) - FL_INDEX_MAX = 24, //Each pool can have up 16MB - #else - #error "Higher TLSF pool sizes should be added for this new config" - #endif - - SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2), - FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2), - FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1), - - SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT), + FL_INDEX_MAX_MIN = 14, // For a less than 16kB pool + SL_INDEX_COUNT_MIN = (1 << SL_INDEX_COUNT_LOG2_MIN), + FL_INDEX_COUNT_MIN = (FL_INDEX_MAX_MIN - (SL_INDEX_COUNT_LOG2_MIN + ALIGN_SIZE_LOG2) + 1), }; -#else -enum tlsf_config -{ - //Specific configuration for host test. - - /* log2 of number of linear subdivisions of block sizes. Larger - ** values require more memory in the control structure. Values of - ** 4 or 5 are typical. - */ - SL_INDEX_COUNT_LOG2 = 5, - - /* All allocation sizes and addresses are aligned to 4 bytes. */ - ALIGN_SIZE_LOG2 = 2, - ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2), - - /* Tunning the first level, we can reduce TLSF pool overhead - * in exchange of manage a pool smaller than 4GB - */ - FL_INDEX_MAX = 30, - - SL_INDEX_COUNT = (1 << SL_INDEX_COUNT_LOG2), - FL_INDEX_SHIFT = (SL_INDEX_COUNT_LOG2 + ALIGN_SIZE_LOG2), - FL_INDEX_COUNT = (FL_INDEX_MAX - FL_INDEX_SHIFT + 1), - - SMALL_BLOCK_SIZE = (1 << FL_INDEX_SHIFT), -}; -#endif diff --git a/components/heap/multi_heap.c b/components/heap/multi_heap.c index 01775d98a7..a72b31e629 100644 --- a/components/heap/multi_heap.c +++ b/components/heap/multi_heap.c @@ -122,7 +122,7 @@ size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p) multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size) { assert(start_ptr); - if(size < (tlsf_size() + tlsf_block_size_min() + sizeof(heap_t))) { + if(size < (tlsf_size(NULL) + tlsf_block_size_min() + sizeof(heap_t))) { //Region too small to be a heap. return NULL; } @@ -130,13 +130,13 @@ multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size) heap_t *result = (heap_t *)start_ptr; size -= sizeof(heap_t); - result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size); + result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, 0); if(!result->heap_data) { return NULL; } result->lock = NULL; - result->free_bytes = size - tlsf_size(); + result->free_bytes = size - tlsf_size(result->heap_data); result->pool_size = size; result->minimum_free_bytes = result->free_bytes; return result; @@ -399,9 +399,7 @@ static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* use void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info) { - uint32_t sl_interval; uint32_t overhead; - memset(info, 0, sizeof(multi_heap_info_t)); if (heap == NULL) { @@ -413,12 +411,9 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info) /* TLSF has an overhead per block. Calculate the total amount of overhead, it shall not be * part of the allocated bytes */ overhead = info->allocated_blocks * tlsf_alloc_overhead(); - info->total_allocated_bytes = (heap->pool_size - tlsf_size()) - heap->free_bytes - overhead; + info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes - overhead; info->minimum_free_bytes = heap->minimum_free_bytes; info->total_free_bytes = heap->free_bytes; - if (info->largest_free_block) { - sl_interval = (1 << (31 - __builtin_clz(info->largest_free_block))) / SL_INDEX_COUNT; - info->largest_free_block = info->largest_free_block & ~(sl_interval - 1); - } + info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block); multi_heap_internal_unlock(heap); } From 7010314c4aab4638cf90a5ae28fd70d2790497d9 Mon Sep 17 00:00:00 2001 From: Philippe Date: Fri, 5 Nov 2021 00:37:45 -0700 Subject: [PATCH 2/7] tlsf control's structure should remain opaque --- components/heap/heap_tlsf.c | 24 ++++++++++++++++++++++++ components/heap/heap_tlsf.h | 23 ----------------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/components/heap/heap_tlsf.c b/components/heap/heap_tlsf.c index 69a105a437..c0c893eb74 100644 --- a/components/heap/heap_tlsf.c +++ b/components/heap/heap_tlsf.c @@ -57,6 +57,30 @@ ** NOTE: TLSF spec relies on ffs/fls returning value 0..31. ** ffs/fls return 1-32 by default, returning 0 for error. */ + +/* The TLSF control structure. */ +typedef struct control_t +{ + /* Empty lists point at this block to indicate they are free. */ + block_header_t block_null; + + /* Local parameter for the pool */ + unsigned int fl_index_count; + unsigned int fl_index_shift; + unsigned int fl_index_max; + unsigned int sl_index_count; + unsigned int sl_index_count_log2; + unsigned int small_block_size; + size_t size; + + /* Bitmaps for free lists. */ + unsigned int fl_bitmap; + unsigned int *sl_bitmap; + + /* Head of free lists. */ + block_header_t** blocks; +} control_t; + static inline __attribute__((__always_inline__)) int tlsf_ffs(unsigned int word) { const unsigned int reverse = word & (~word + 1); diff --git a/components/heap/heap_tlsf.h b/components/heap/heap_tlsf.h index 4347a0d0c8..f01b5d3095 100644 --- a/components/heap/heap_tlsf.h +++ b/components/heap/heap_tlsf.h @@ -72,29 +72,6 @@ typedef struct block_header_t struct block_header_t* prev_free; } block_header_t; -/* The TLSF control structure. */ -typedef struct control_t -{ - /* Empty lists point at this block to indicate they are free. */ - block_header_t block_null; - - /* Local parameter for the pool */ - unsigned int fl_index_count; - unsigned int fl_index_shift; - unsigned int fl_index_max; - unsigned int sl_index_count; - unsigned int sl_index_count_log2; - unsigned int small_block_size; - size_t size; - - /* Bitmaps for free lists. */ - unsigned int fl_bitmap; - unsigned int *sl_bitmap; - - /* Head of free lists. */ - block_header_t** blocks; -} control_t; - #include "heap_tlsf_block_functions.h" /* tlsf_t: a TLSF structure. Can contain 1 to N pools. */ From ffcc115e8b1536801f9da165878afc4b752a97f0 Mon Sep 17 00:00:00 2001 From: Philippe Date: Fri, 5 Nov 2021 00:44:21 -0700 Subject: [PATCH 3/7] clarify parameter usage in tslf_create --- components/heap/heap_tlsf.c | 4 ++-- components/heap/heap_tlsf.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/components/heap/heap_tlsf.c b/components/heap/heap_tlsf.c index c0c893eb74..1fdcfad0a3 100644 --- a/components/heap/heap_tlsf.c +++ b/components/heap/heap_tlsf.c @@ -806,7 +806,7 @@ void tlsf_remove_pool(tlsf_t tlsf, pool_t pool) */ -tlsf_t tlsf_create(void* mem, size_t bytes) +tlsf_t tlsf_create(void* mem, size_t max_bytes) { #if _DEBUG if (test_ffs_fls()) @@ -822,7 +822,7 @@ tlsf_t tlsf_create(void* mem, size_t bytes) return 0; } - control_construct(tlsf_cast(control_t*, mem), bytes); + control_construct(tlsf_cast(control_t*, mem), max_bytes); return tlsf_cast(tlsf_t, mem); } diff --git a/components/heap/heap_tlsf.h b/components/heap/heap_tlsf.h index f01b5d3095..b2f94a6221 100644 --- a/components/heap/heap_tlsf.h +++ b/components/heap/heap_tlsf.h @@ -80,7 +80,7 @@ typedef void* tlsf_t; typedef void* pool_t; /* Create/destroy a memory pool. */ -tlsf_t tlsf_create(void* mem, size_t bytes); +tlsf_t tlsf_create(void* mem, size_t max_bytes); tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes); pool_t tlsf_get_pool(tlsf_t tlsf); From 6dae07d13704ab43253779599c245f38bc30f20f Mon Sep 17 00:00:00 2001 From: Philippe Date: Fri, 5 Nov 2021 17:50:56 -0700 Subject: [PATCH 4/7] add host test with multiple heap size --- .../test_multi_heap_host/test_multi_heap.cpp | 35 ++++++++++++++----- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/components/heap/test_multi_heap_host/test_multi_heap.cpp b/components/heap/test_multi_heap_host/test_multi_heap.cpp index 4c45634fb4..90bb44589e 100644 --- a/components/heap/test_multi_heap_host/test_multi_heap.cpp +++ b/components/heap/test_multi_heap_host/test_multi_heap.cpp @@ -8,6 +8,16 @@ #include #include +static void *__malloc__(size_t bytes) +{ + return malloc(bytes); +} + +static void __free__(void *ptr) +{ + free(ptr); +} + /* Insurance against accidentally using libc heap functions in tests */ #undef free #define free #error @@ -204,16 +214,18 @@ TEST_CASE("multi_heap defrag realloc", "[multi_heap]") #endif -TEST_CASE("multi_heap many random allocations", "[multi_heap]") +void multi_heap_allocation_impl(int heap_size) { - uint8_t big_heap[8 * 1024]; + uint8_t *big_heap = (uint8_t *) __malloc__(2*heap_size); const int NUM_POINTERS = 64; - printf("Running multi-allocation test...\n"); + printf("Running multi-allocation test with heap_size %d...\n", heap_size); + + REQUIRE( big_heap ); + multi_heap_handle_t heap = multi_heap_register(big_heap, heap_size); void *p[NUM_POINTERS] = { 0 }; size_t s[NUM_POINTERS] = { 0 }; - multi_heap_handle_t heap = multi_heap_register(big_heap, sizeof(big_heap)); const size_t initial_free = multi_heap_free_size(heap); @@ -241,13 +253,12 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]") s[n] = new_size; if (new_size > 0) { REQUIRE( p[n] >= big_heap ); - REQUIRE( p[n] < big_heap + sizeof(big_heap) ); + REQUIRE( p[n] < big_heap + heap_size ); memset(p[n], n, new_size); } } continue; } - if (p[n] != NULL) { if (s[n] > 0) { /* Verify pre-existing contents of p[n] */ @@ -271,14 +282,13 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]") printf("malloc %p (%zu)\n", p[n], s[n]); if (p[n] != NULL) { REQUIRE( p[n] >= big_heap ); - REQUIRE( p[n] < big_heap + sizeof(big_heap) ); + REQUIRE( p[n] < big_heap + heap_size ); } if (!multi_heap_check(heap, true)) { printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]); multi_heap_dump(heap); REQUIRE(0); } - if (p[n] != NULL) { memset(p[n], n, s[n]); } @@ -294,6 +304,15 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]") } REQUIRE( initial_free == multi_heap_free_size(heap) ); + __free__(big_heap); +} + +TEST_CASE("multi_heap many random allocations", "[multi_heap]") +{ + size_t poolsize[] = { 15, 255, 4095, 8191 }; + for (size_t i = 0; i < sizeof(poolsize)/sizeof(size_t); i++) { + multi_heap_allocation_impl(poolsize[i] * 1024); + } } TEST_CASE("multi_heap_get_info() function", "[multi_heap]") From a1ca11551fd8489a88aedddce796afc26f02d118 Mon Sep 17 00:00:00 2001 From: Guillaume Souchere Date: Tue, 20 Sep 2022 08:58:04 +0200 Subject: [PATCH 5/7] Revert "tlsf control's structure should remain opaque" This reverts commit 7010314c4aab4638cf90a5ae28fd70d2790497d9. --- components/heap/heap_tlsf.c | 24 ------------------------ components/heap/heap_tlsf.h | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/components/heap/heap_tlsf.c b/components/heap/heap_tlsf.c index 1fdcfad0a3..010eceda5f 100644 --- a/components/heap/heap_tlsf.c +++ b/components/heap/heap_tlsf.c @@ -57,30 +57,6 @@ ** NOTE: TLSF spec relies on ffs/fls returning value 0..31. ** ffs/fls return 1-32 by default, returning 0 for error. */ - -/* The TLSF control structure. */ -typedef struct control_t -{ - /* Empty lists point at this block to indicate they are free. */ - block_header_t block_null; - - /* Local parameter for the pool */ - unsigned int fl_index_count; - unsigned int fl_index_shift; - unsigned int fl_index_max; - unsigned int sl_index_count; - unsigned int sl_index_count_log2; - unsigned int small_block_size; - size_t size; - - /* Bitmaps for free lists. */ - unsigned int fl_bitmap; - unsigned int *sl_bitmap; - - /* Head of free lists. */ - block_header_t** blocks; -} control_t; - static inline __attribute__((__always_inline__)) int tlsf_ffs(unsigned int word) { const unsigned int reverse = word & (~word + 1); diff --git a/components/heap/heap_tlsf.h b/components/heap/heap_tlsf.h index b2f94a6221..c140269ac6 100644 --- a/components/heap/heap_tlsf.h +++ b/components/heap/heap_tlsf.h @@ -72,6 +72,29 @@ typedef struct block_header_t struct block_header_t* prev_free; } block_header_t; +/* The TLSF control structure. */ +typedef struct control_t +{ + /* Empty lists point at this block to indicate they are free. */ + block_header_t block_null; + + /* Local parameter for the pool */ + unsigned int fl_index_count; + unsigned int fl_index_shift; + unsigned int fl_index_max; + unsigned int sl_index_count; + unsigned int sl_index_count_log2; + unsigned int small_block_size; + size_t size; + + /* Bitmaps for free lists. */ + unsigned int fl_bitmap; + unsigned int *sl_bitmap; + + /* Head of free lists. */ + block_header_t** blocks; +} control_t; + #include "heap_tlsf_block_functions.h" /* tlsf_t: a TLSF structure. Can contain 1 to N pools. */ From d4de73cd2cddc68d607e9065a51048a3c0744c33 Mon Sep 17 00:00:00 2001 From: Guillaume Souchere Date: Thu, 13 Oct 2022 10:02:29 +0200 Subject: [PATCH 6/7] heap: update the calculation of fl index max and use bitfield in control_t The calculation of fl index max is changed to always be the smallest number that includes the size of the registered memory. The control_construct() function now checks for minimum size as the control structure parameters are calculated. There is no longer a minimum configuration for fl index max so the tlsf_config enum is striped down to remove unecessary compile time values. the tlsf_size() function will fail if no tlsf pointer is passed as parameter since there is no way to calculate a default tlsf size anymore. bitfields are now used in control_t when possible which reduces the size of the structure from 56 bytes to 36 bytes. --- components/heap/heap_tlsf.c | 93 ++++++++++++++------- components/heap/heap_tlsf.h | 35 ++++++-- components/heap/heap_tlsf_block_functions.h | 3 - components/heap/heap_tlsf_config.h | 21 ----- components/heap/multi_heap.c | 9 +- 5 files changed, 97 insertions(+), 64 deletions(-) diff --git a/components/heap/heap_tlsf.c b/components/heap/heap_tlsf.c index 010eceda5f..334b88eb5f 100644 --- a/components/heap/heap_tlsf.c +++ b/components/heap/heap_tlsf.c @@ -142,7 +142,7 @@ static inline __attribute__((__always_inline__)) void mapping_insert(control_t * { /* Store small blocks in first list. */ fl = 0; - sl = tlsf_cast(int, size) >> 2; + sl = tlsf_cast(int, size) / (control->small_block_size / control->sl_index_count); } else { @@ -459,16 +459,19 @@ static inline __attribute__((__always_inline__)) void* block_prepare_used(contro } /* Clear structure and point all empty lists at the null block. */ -static void control_construct(control_t* control, size_t bytes) +static control_t* control_construct(control_t* control, size_t bytes) { - int i, j; + // check that the requested size can at least hold the control_t. This will allow us + // to fill in the field of control_t necessary to determine the final size of + // the metadata overhead and check that the requested size can hold + // this data and at least a block of minimum size + if (bytes < sizeof(control_t)) + { + return NULL; + } - control->block_null.next_free = &control->block_null; - control->block_null.prev_free = &control->block_null; - - /* find the closest ^2 for first layer */ - i = (bytes - 1) / (16 * 1024); - control->fl_index_max = FL_INDEX_MAX_MIN + sizeof(i) * 8 - __builtin_clz(i); + /* Find the closest power of two for first layer */ + control->fl_index_max = 32 - __builtin_clz(bytes); /* adapt second layer to the pool */ if (bytes <= 16 * 1024) control->sl_index_count_log2 = 3; @@ -479,11 +482,26 @@ static void control_construct(control_t* control, size_t bytes) control->sl_index_count = 1 << control->sl_index_count_log2; control->fl_index_count = control->fl_index_max - control->fl_index_shift + 1; control->small_block_size = 1 << control->fl_index_shift; + + // the total size fo the metadata overhead is the size of the control_t + // added to the size of the sl_bitmaps and the size of blocks + control->size = sizeof(control_t) + (sizeof(*control->sl_bitmap) * control->fl_index_count) + + (sizeof(*control->blocks) * (control->fl_index_count * control->sl_index_count)); + + // check that the requested size can hold the whole control structure and + // a small block at least + if (bytes < control->size + block_size_min) + { + return NULL; + } + + control->block_null.next_free = &control->block_null; + control->block_null.prev_free = &control->block_null; + control->fl_bitmap = 0; control->sl_bitmap = align_ptr(control + 1, sizeof(*control->sl_bitmap)); control->blocks = align_ptr(control->sl_bitmap + control->fl_index_count, sizeof(*control->blocks)); - control->size = (void*) (control->blocks + control->sl_index_count * control->fl_index_count) - (void*) control; /* SL_INDEX_COUNT must be <= number of bits in sl_bitmap's storage type. */ tlsf_assert(sizeof(unsigned int) * CHAR_BIT >= control->sl_index_count && "CHAR_BIT less than sl_index_count"); @@ -491,14 +509,16 @@ static void control_construct(control_t* control, size_t bytes) /* Ensure we've properly tuned our sizes. */ tlsf_assert(ALIGN_SIZE == control->small_block_size / control->sl_index_count && "ALIGN_SIZE does not match"); - for (i = 0; i < control->fl_index_count; ++i) + for (int i = 0; i < control->fl_index_count; ++i) { control->sl_bitmap[i] = 0; - for (j = 0; j < control->sl_index_count; ++j) + for (int j = 0; j < control->sl_index_count; ++j) { control->blocks[i*control->sl_index_count + j] = &control->block_null; } } + + return control; } /* @@ -652,13 +672,13 @@ int tlsf_check_pool(pool_t pool) size_t tlsf_fit_size(tlsf_t tlsf, size_t size) { /* because it's GoodFit, allocable size is one range lower */ - if (size) + if (size && tlsf != NULL) { size_t sl_interval; control_t* control = tlsf_cast(control_t*, tlsf); - sl_interval = (1 << ((sizeof(size_t) * 8 - 1) - __builtin_clz(size))) / control->sl_index_count; - return size & ~(sl_interval - 1); - } + sl_interval = (1 << (32 - __builtin_clz(size) - 1)) / control->sl_index_count; + return size & ~(sl_interval - 1); + } return 0; } @@ -670,16 +690,12 @@ size_t tlsf_fit_size(tlsf_t tlsf, size_t size) */ size_t tlsf_size(tlsf_t tlsf) { - if (tlsf) + if (tlsf == NULL) { - control_t* control = tlsf_cast(control_t*, tlsf); - return control->size; + return 0; } - - /* no tlsf, we'll just return a min size */ - return sizeof(control_t) + - sizeof(int) * SL_INDEX_COUNT_MIN + - sizeof(block_header_t*) * SL_INDEX_COUNT_MIN * FL_INDEX_COUNT_MIN; + control_t* control = tlsf_cast(control_t*, tlsf); + return control->size; } size_t tlsf_align_size(void) @@ -694,6 +710,10 @@ size_t tlsf_block_size_min(void) size_t tlsf_block_size_max(tlsf_t tlsf) { + if (tlsf == NULL) + { + return 0; + } control_t* control = tlsf_cast(control_t*, tlsf); return tlsf_cast(size_t, 1) << control->fl_index_max; } @@ -787,20 +807,24 @@ tlsf_t tlsf_create(void* mem, size_t max_bytes) #if _DEBUG if (test_ffs_fls()) { - return 0; + return NULL; } #endif + if (mem == NULL) + { + return NULL; + } + if (((tlsfptr_t)mem % ALIGN_SIZE) != 0) { printf("tlsf_create: Memory must be aligned to %u bytes.\n", (unsigned int)ALIGN_SIZE); - return 0; + return NULL; } - control_construct(tlsf_cast(control_t*, mem), max_bytes); - - return tlsf_cast(tlsf_t, mem); + control_t* control_ptr = control_construct(tlsf_cast(control_t*, mem), max_bytes); + return tlsf_cast(tlsf_t, control_ptr); } pool_t tlsf_get_pool(tlsf_t tlsf) @@ -811,7 +835,10 @@ pool_t tlsf_get_pool(tlsf_t tlsf) tlsf_t tlsf_create_with_pool(void* mem, size_t pool_bytes, size_t max_bytes) { tlsf_t tlsf = tlsf_create(mem, max_bytes ? max_bytes : pool_bytes); - tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf)); + if (tlsf != NULL) + { + tlsf_add_pool(tlsf, (char*)mem + tlsf_size(tlsf), pool_bytes - tlsf_size(tlsf)); + } return tlsf; } @@ -979,6 +1006,12 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size) const size_t combined = cursize + block_size(next) + block_header_overhead; const size_t adjust = adjust_request_size(tlsf, size, ALIGN_SIZE); + // if adjust if equal to 0, the size is too big + if (adjust == 0) + { + return p; + } + tlsf_assert(!block_is_free(block) && "block already marked as free"); /* diff --git a/components/heap/heap_tlsf.h b/components/heap/heap_tlsf.h index c140269ac6..0937ee711c 100644 --- a/components/heap/heap_tlsf.h +++ b/components/heap/heap_tlsf.h @@ -78,13 +78,25 @@ typedef struct control_t /* Empty lists point at this block to indicate they are free. */ block_header_t block_null; - /* Local parameter for the pool */ - unsigned int fl_index_count; - unsigned int fl_index_shift; - unsigned int fl_index_max; - unsigned int sl_index_count; - unsigned int sl_index_count_log2; - unsigned int small_block_size; + /* Local parameter for the pool. Given the maximum + * value of each field, all the following parameters + * can fit on 4 bytes when using bitfields + */ + unsigned int fl_index_count : 5; // 5 cumulated bits + unsigned int fl_index_shift : 3; // 8 cumulated bits + unsigned int fl_index_max : 6; // 14 cumulated bits + unsigned int sl_index_count : 6; // 20 cumulated bits + + /* log2 of number of linear subdivisions of block sizes. Larger + ** values require more memory in the control structure. Values of + ** 4 or 5 are typical. + */ + unsigned int sl_index_count_log2 : 3; // 23 cumulated bits + unsigned int small_block_size : 8; // 31 cumulated bits + + /* size of the metadata ( size of control block, + * sl_bitmap and blocks ) + */ size_t size; /* Bitmaps for free lists. */ @@ -128,6 +140,15 @@ size_t tlsf_block_size_min(void); size_t tlsf_block_size_max(tlsf_t tlsf); size_t tlsf_pool_overhead(void); size_t tlsf_alloc_overhead(void); + +/** + * @brief Return the allocable size based on the size passed + * as parameter + * + * @param tlsf Pointer to the tlsf structure + * @param size The allocation size + * @return size_t The updated allocation size + */ size_t tlsf_fit_size(tlsf_t tlsf, size_t size); /* Debugging. */ diff --git a/components/heap/heap_tlsf_block_functions.h b/components/heap/heap_tlsf_block_functions.h index efc92c9c28..18211c997f 100644 --- a/components/heap/heap_tlsf_block_functions.h +++ b/components/heap/heap_tlsf_block_functions.h @@ -63,11 +63,8 @@ ** A free block must be large enough to store its header minus the size of ** the prev_phys_block field, and no larger than the number of addressable ** bits for FL_INDEX. -** The block_size_max macro returns the maximum block for the minimum pool -** use tlsf_block_size_max for a value specific to the pool */ #define block_size_min (sizeof(block_header_t) - sizeof(block_header_t*)) -#define block_size_max (tlsf_cast(size_t, 1) << FL_INDEX_MAX_MIN) /* ** block_header_t member functions. diff --git a/components/heap/heap_tlsf_config.h b/components/heap/heap_tlsf_config.h index bb378e8a2a..70f4690638 100644 --- a/components/heap/heap_tlsf_config.h +++ b/components/heap/heap_tlsf_config.h @@ -39,28 +39,7 @@ enum tlsf_config { - /* log2 of number of linear subdivisions of block sizes. Larger - ** values require more memory in the control structure. Values of - ** 4 or 5 are typical, 3 is for very small pools. - */ - SL_INDEX_COUNT_LOG2_MIN = 3, - /* All allocation sizes and addresses are aligned to 4 bytes. */ ALIGN_SIZE_LOG2 = 2, ALIGN_SIZE = (1 << ALIGN_SIZE_LOG2), - - /* - ** We support allocations of sizes up to (1 << FL_INDEX_MAX) bits. - ** However, because we linearly subdivide the second-level lists, and - ** our minimum size granularity is 4 bytes, it doesn't make sense to - ** create first-level lists for sizes smaller than SL_INDEX_COUNT * 4, - ** or (1 << (SL_INDEX_COUNT_LOG2 + 2)) bytes, as there we will be - ** trying to split size ranges into more slots than we have available. - ** Instead, we calculate the minimum threshold size, and place all - ** blocks below that size into the 0th first-level list. - ** Values below are the absolute minimum to accept a pool addition - */ - FL_INDEX_MAX_MIN = 14, // For a less than 16kB pool - SL_INDEX_COUNT_MIN = (1 << SL_INDEX_COUNT_LOG2_MIN), - FL_INDEX_COUNT_MIN = (FL_INDEX_MAX_MIN - (SL_INDEX_COUNT_LOG2_MIN + ALIGN_SIZE_LOG2) + 1), }; diff --git a/components/heap/multi_heap.c b/components/heap/multi_heap.c index a72b31e629..6b0d0a2c93 100644 --- a/components/heap/multi_heap.c +++ b/components/heap/multi_heap.c @@ -122,7 +122,7 @@ size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p) multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size) { assert(start_ptr); - if(size < (tlsf_size(NULL) + tlsf_block_size_min() + sizeof(heap_t))) { + if(size < (sizeof(heap_t))) { //Region too small to be a heap. return NULL; } @@ -130,7 +130,10 @@ multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size) heap_t *result = (heap_t *)start_ptr; size -= sizeof(heap_t); - result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, 0); + /* Do not specify any maximum size for the allocations so that the default configuration is used */ + const size_t max_bytes = 0; + + result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, max_bytes); if(!result->heap_data) { return NULL; } @@ -414,6 +417,6 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info) info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes - overhead; info->minimum_free_bytes = heap->minimum_free_bytes; info->total_free_bytes = heap->free_bytes; - info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block); + info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block); multi_heap_internal_unlock(heap); } From 75e1c4d0fbbf409dc220e4008e1713b7ddb038fe Mon Sep 17 00:00:00 2001 From: Guillaume Souchere Date: Thu, 13 Oct 2022 10:05:53 +0200 Subject: [PATCH 7/7] heap: Update host tests after incorporation of the new TLSF implementation --- .../test_multi_heap_host/test_multi_heap.cpp | 45 +++++++++++-------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/components/heap/test_multi_heap_host/test_multi_heap.cpp b/components/heap/test_multi_heap_host/test_multi_heap.cpp index 90bb44589e..d376e106bb 100644 --- a/components/heap/test_multi_heap_host/test_multi_heap.cpp +++ b/components/heap/test_multi_heap_host/test_multi_heap.cpp @@ -8,6 +8,14 @@ #include #include + +/* The functions __malloc__ and __free__ are used to call the libc + * malloc and free and allocate memory from the host heap. Since the test + * `TEST_CASE("multi_heap many random allocations", "[multi_heap]")` + * calls multi_heap_allocation_impl() with sizes that can go up to 8MB, + * an allocatation on the heap will be prefered rather than the stack which + * might not have the necessary memory. + */ static void *__malloc__(size_t bytes) { return malloc(bytes); @@ -71,10 +79,11 @@ TEST_CASE("multi_heap simple allocations", "[multi_heap]") TEST_CASE("multi_heap fragmentation", "[multi_heap]") { - uint8_t small_heap[4 * 1024]; + const size_t HEAP_SIZE = 4 * 1024; + uint8_t small_heap[HEAP_SIZE]; multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap)); - const size_t alloc_size = 128; + const size_t alloc_size = 500; void *p[4]; for (int i = 0; i < 4; i++) { @@ -216,7 +225,7 @@ TEST_CASE("multi_heap defrag realloc", "[multi_heap]") void multi_heap_allocation_impl(int heap_size) { - uint8_t *big_heap = (uint8_t *) __malloc__(2*heap_size); + uint8_t *big_heap = (uint8_t *) __malloc__(heap_size); const int NUM_POINTERS = 64; printf("Running multi-allocation test with heap_size %d...\n", heap_size); @@ -229,7 +238,7 @@ void multi_heap_allocation_impl(int heap_size) const size_t initial_free = multi_heap_free_size(heap); - const int ITERATIONS = 10000; + const int ITERATIONS = 5000; for (int i = 0; i < ITERATIONS; i++) { /* check all pointers allocated so far are valid inside big_heap */ @@ -240,11 +249,11 @@ void multi_heap_allocation_impl(int heap_size) uint8_t n = rand() % NUM_POINTERS; - if (rand() % 4 == 0) { + if (i % 4 == 0) { /* 1 in 4 iterations, try to realloc the buffer instead of using malloc/free */ - size_t new_size = rand() % 1024; + size_t new_size = (rand() % 1023) + 1; void *new_p = multi_heap_realloc(heap, p[n], new_size); printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size); multi_heap_check(heap, true); @@ -412,8 +421,9 @@ TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]") TEST_CASE("multi_heap_realloc()", "[multi_heap]") { + const size_t HEAP_SIZE = 4 * 1024; const uint32_t PATTERN = 0xABABDADA; - uint8_t small_heap[4 * 1024]; + uint8_t small_heap[HEAP_SIZE]; multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap)); uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64); @@ -423,7 +433,6 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]") REQUIRE( b > a); /* 'b' takes the block after 'a' */ *a = PATTERN; - uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72); REQUIRE( multi_heap_check(heap, true)); REQUIRE( c != NULL ); @@ -433,13 +442,12 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]") #ifndef MULTI_HEAP_POISONING_SLOW // "Slow" poisoning implementation doesn't reallocate in place, so these // test will fail... - uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36); REQUIRE( multi_heap_check(heap, true) ); REQUIRE( c == d ); /* 'c' block should be shrunk in-place */ REQUIRE( *d == PATTERN); - - uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64); + // biggest allocation possible to completely fill the block left free after it was reallocated + uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 60); REQUIRE( multi_heap_check(heap, true)); REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */ @@ -448,11 +456,7 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]") REQUIRE( multi_heap_check(heap, true) ); REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */ -#ifdef MULTI_HEAP_POISONING -#define TOO_MUCH 7420 + 1 -#else -#define TOO_MUCH 7420 + 1 -#endif +#define TOO_MUCH HEAP_SIZE + 1 /* not enough contiguous space left in the heap */ uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH); REQUIRE( g == NULL ); @@ -462,7 +466,8 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]") g = (uint32_t *)multi_heap_realloc(heap, e, 128); REQUIRE( multi_heap_check(heap, true) ); REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */ -#endif + +#endif // MULTI_HEAP_POISONING_SLOW } // TLSF only accepts heaps aligned to 4-byte boundary so @@ -561,8 +566,12 @@ TEST_CASE("multi_heap poisoning detection", "[multi_heap]") /* register the heap memory. One free block only will be available */ multi_heap_handle_t heap = multi_heap_register(heap_mem, HEAP_SIZE); + control_t *tlsf_ptr = (control_t*)(heap_mem + 20); + const size_t control_t_size = tlsf_ptr->size; + const size_t heap_t_size = 20; + /* offset in memory at which to find the first free memory byte */ - const size_t free_memory_offset = sizeof(multi_heap_info_t) + sizeof(control_t) + block_header_overhead; + const size_t free_memory_offset = heap_t_size + control_t_size + sizeof(block_header_t) - block_header_overhead; /* block header of the free block under test in the heap () */ const block_header_t* block = (block_header_t*)(heap_mem + free_memory_offset - sizeof(block_header_t));