mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-04 05:04:33 +02:00
Merge branch 'feature/tlsf-dynamic-control-size' into 'master'
heap: Update to the new tlsf implementation of dynamic metadata size See merge request espressif/esp-idf!20584
This commit is contained in:
@@ -8,11 +8,6 @@ set(includes "include")
|
|||||||
if(NOT CONFIG_HEAP_TLSF_USE_ROM_IMPL)
|
if(NOT CONFIG_HEAP_TLSF_USE_ROM_IMPL)
|
||||||
set(priv_includes "tlsf")
|
set(priv_includes "tlsf")
|
||||||
list(APPEND srcs "tlsf/tlsf.c")
|
list(APPEND srcs "tlsf/tlsf.c")
|
||||||
if(NOT CMAKE_BUILD_EARLY_EXPANSION)
|
|
||||||
set_source_files_properties(tlsf/tlsf.c
|
|
||||||
PROPERTIES COMPILE_FLAGS
|
|
||||||
"-include ../tlsf_platform.h")
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT CONFIG_HEAP_POISONING_DISABLED)
|
if(NOT CONFIG_HEAP_POISONING_DISABLED)
|
||||||
|
@@ -170,13 +170,13 @@ esp_err_t heap_caps_add_region(intptr_t start, intptr_t end)
|
|||||||
bool heap_caps_check_add_region_allowed(intptr_t heap_start, intptr_t heap_end, intptr_t start, intptr_t end)
|
bool heap_caps_check_add_region_allowed(intptr_t heap_start, intptr_t heap_end, intptr_t start, intptr_t end)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We assume that in any region, the "start" must be stictly less than the end.
|
* We assume that in any region, the "start" must be strictly less than the end.
|
||||||
* Specially, the 3rd scenario can be allowed. For example, allocate memory from heap,
|
* Specially, the 3rd scenario can be allowed. For example, allocate memory from heap,
|
||||||
* then change the capability and call this function to create a new region for special
|
* then change the capability and call this function to create a new region for special
|
||||||
* application.
|
* application.
|
||||||
* This 'start = start' and 'end = end' scenario is incorrect because the same region
|
* This 'start = start' and 'end = end' scenario is incorrect because the same region
|
||||||
* cannot be add twice. For example, add the .bss memory to region twice, if not do the
|
* cannot be added twice. In fact, registering the same memory region as a heap twice
|
||||||
* check, it will cause exception.
|
* would cause a corruption and then an exception at runtime.
|
||||||
*
|
*
|
||||||
* the existing heap region s(tart) e(nd)
|
* the existing heap region s(tart) e(nd)
|
||||||
* |----------------------|
|
* |----------------------|
|
||||||
@@ -201,7 +201,7 @@ bool heap_caps_check_add_region_allowed(intptr_t heap_start, intptr_t heap_end,
|
|||||||
bool condition_4 = start < heap_end && end > heap_end; // if true then region not allowed
|
bool condition_4 = start < heap_end && end > heap_end; // if true then region not allowed
|
||||||
bool condition_6 = start == heap_start && end == heap_end; // if true then region not allowed
|
bool condition_6 = start == heap_start && end == heap_end; // if true then region not allowed
|
||||||
|
|
||||||
return (condition_2 || condition_4 || condition_6) ? false: true;
|
return !(condition_2 || condition_4 || condition_6);
|
||||||
}
|
}
|
||||||
|
|
||||||
esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
|
esp_err_t heap_caps_add_region_with_caps(const uint32_t caps[], intptr_t start, intptr_t end)
|
||||||
|
@@ -142,7 +142,7 @@ size_t multi_heap_get_allocated_size_impl(multi_heap_handle_t heap, void *p)
|
|||||||
multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
|
multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
|
||||||
{
|
{
|
||||||
assert(start_ptr);
|
assert(start_ptr);
|
||||||
if(size < (tlsf_size() + tlsf_block_size_min() + sizeof(heap_t))) {
|
if(size < (sizeof(heap_t))) {
|
||||||
//Region too small to be a heap.
|
//Region too small to be a heap.
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@@ -150,13 +150,16 @@ multi_heap_handle_t multi_heap_register_impl(void *start_ptr, size_t size)
|
|||||||
heap_t *result = (heap_t *)start_ptr;
|
heap_t *result = (heap_t *)start_ptr;
|
||||||
size -= sizeof(heap_t);
|
size -= sizeof(heap_t);
|
||||||
|
|
||||||
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size);
|
/* Do not specify any maximum size for the allocations so that the default configuration is used */
|
||||||
|
const size_t max_bytes = 0;
|
||||||
|
|
||||||
|
result->heap_data = tlsf_create_with_pool(start_ptr + sizeof(heap_t), size, max_bytes);
|
||||||
if(!result->heap_data) {
|
if(!result->heap_data) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
result->lock = NULL;
|
result->lock = NULL;
|
||||||
result->free_bytes = size - tlsf_size();
|
result->free_bytes = size - tlsf_size(result->heap_data);
|
||||||
result->pool_size = size;
|
result->pool_size = size;
|
||||||
result->minimum_free_bytes = result->free_bytes;
|
result->minimum_free_bytes = result->free_bytes;
|
||||||
return result;
|
return result;
|
||||||
@@ -417,7 +420,6 @@ static void multi_heap_get_info_tlsf(void* ptr, size_t size, int used, void* use
|
|||||||
|
|
||||||
void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
|
void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
|
||||||
{
|
{
|
||||||
uint32_t sl_interval;
|
|
||||||
uint32_t overhead;
|
uint32_t overhead;
|
||||||
|
|
||||||
memset(info, 0, sizeof(multi_heap_info_t));
|
memset(info, 0, sizeof(multi_heap_info_t));
|
||||||
@@ -431,13 +433,10 @@ void multi_heap_get_info_impl(multi_heap_handle_t heap, multi_heap_info_t *info)
|
|||||||
/* TLSF has an overhead per block. Calculate the total amount of overhead, it shall not be
|
/* TLSF has an overhead per block. Calculate the total amount of overhead, it shall not be
|
||||||
* part of the allocated bytes */
|
* part of the allocated bytes */
|
||||||
overhead = info->allocated_blocks * tlsf_alloc_overhead();
|
overhead = info->allocated_blocks * tlsf_alloc_overhead();
|
||||||
info->total_allocated_bytes = (heap->pool_size - tlsf_size()) - heap->free_bytes - overhead;
|
info->total_allocated_bytes = (heap->pool_size - tlsf_size(heap->heap_data)) - heap->free_bytes - overhead;
|
||||||
info->minimum_free_bytes = heap->minimum_free_bytes;
|
info->minimum_free_bytes = heap->minimum_free_bytes;
|
||||||
info->total_free_bytes = heap->free_bytes;
|
info->total_free_bytes = heap->free_bytes;
|
||||||
if (info->largest_free_block) {
|
info->largest_free_block = tlsf_fit_size(heap->heap_data, info->largest_free_block);
|
||||||
sl_interval = (1 << (31 - __builtin_clz(info->largest_free_block))) / SL_INDEX_COUNT;
|
|
||||||
info->largest_free_block = info->largest_free_block & ~(sl_interval - 1);
|
|
||||||
}
|
|
||||||
multi_heap_internal_unlock(heap);
|
multi_heap_internal_unlock(heap);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@@ -15,18 +15,16 @@
|
|||||||
#include "esp_system.h"
|
#include "esp_system.h"
|
||||||
#include "heap_memory_layout.h"
|
#include "heap_memory_layout.h"
|
||||||
|
|
||||||
#include "../tlsf/tlsf.h"
|
|
||||||
|
|
||||||
extern void set_leak_threshold(int threshold);
|
extern void set_leak_threshold(int threshold);
|
||||||
|
|
||||||
/* NOTE: This is not a well-formed unit test, it leaks memory */
|
/* NOTE: This is not a well-formed unit test, it leaks memory */
|
||||||
TEST_CASE("Allocate new heap at runtime", "[heap]")
|
TEST_CASE("Allocate new heap at runtime", "[heap]")
|
||||||
{
|
{
|
||||||
// 84 bytes of overhead to account for multi_heap structs and eventual
|
/* The value of the heap overhead is calculated for the worst case scenario
|
||||||
// poisoning bytes + size of control_t from tlsf
|
* where the tlsf has a size of metadata fixed at runtime.
|
||||||
const size_t HEAP_OVERHEAD_MAX = tlsf_size() + 84;
|
*/
|
||||||
const size_t MIN_HEAP_SIZE = HEAP_OVERHEAD_MAX + tlsf_block_size_min();
|
const size_t HEAP_OVERHEAD_MAX = 3248;
|
||||||
const size_t BUF_SZ = MIN_HEAP_SIZE;
|
const size_t BUF_SZ = 3500;
|
||||||
void *buffer = malloc(BUF_SZ);
|
void *buffer = malloc(BUF_SZ);
|
||||||
|
|
||||||
TEST_ASSERT_NOT_NULL(buffer);
|
TEST_ASSERT_NOT_NULL(buffer);
|
||||||
@@ -46,12 +44,8 @@ TEST_CASE("Allocate new heap at runtime", "[heap]")
|
|||||||
*/
|
*/
|
||||||
TEST_CASE("Allocate new heap with new capability", "[heap]")
|
TEST_CASE("Allocate new heap with new capability", "[heap]")
|
||||||
{
|
{
|
||||||
// 84 bytes of overhead to account for multi_heap structs and eventual
|
const size_t BUF_SZ = 3500;
|
||||||
// poisoning bytes + size of control_t from tlsf
|
const size_t ALLOC_SZ = 64;
|
||||||
const size_t HEAP_OVERHEAD = tlsf_size() + 84;
|
|
||||||
const size_t MIN_HEAP_SIZE = HEAP_OVERHEAD + tlsf_block_size_min();
|
|
||||||
const size_t BUF_SZ = MIN_HEAP_SIZE;
|
|
||||||
const size_t ALLOC_SZ = tlsf_block_size_min();
|
|
||||||
|
|
||||||
const uint32_t MALLOC_CAP_INVENTED = (1 << 30); /* this must be unused in esp_heap_caps.h */
|
const uint32_t MALLOC_CAP_INVENTED = (1 << 30); /* this must be unused in esp_heap_caps.h */
|
||||||
|
|
||||||
@@ -67,7 +61,7 @@ TEST_CASE("Allocate new heap with new capability", "[heap]")
|
|||||||
TEST_ASSERT_NOT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
|
TEST_ASSERT_NOT_NULL( heap_caps_malloc(ALLOC_SZ, MALLOC_CAP_INVENTED) );
|
||||||
|
|
||||||
// set the leak threshold to a bigger value as this test leaks memory
|
// set the leak threshold to a bigger value as this test leaks memory
|
||||||
set_leak_threshold(-3000);
|
set_leak_threshold(-4000);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* NOTE: This is not a well-formed unit test.
|
/* NOTE: This is not a well-formed unit test.
|
||||||
@@ -76,8 +70,11 @@ TEST_CASE("Allocate new heap with new capability", "[heap]")
|
|||||||
|
|
||||||
TEST_CASE("Add .bss memory to heap region runtime", "[heap]")
|
TEST_CASE("Add .bss memory to heap region runtime", "[heap]")
|
||||||
{
|
{
|
||||||
|
/* The value of the heap overhead is calculated for the worst case scenario
|
||||||
|
* where the tlsf has a size of metadata fixed at runtime.
|
||||||
|
*/
|
||||||
#define HEAP_OVERHEAD_MAX 3248
|
#define HEAP_OVERHEAD_MAX 3248
|
||||||
#define BUF_SZ 3260
|
#define BUF_SZ 3500
|
||||||
static uint8_t s_buffer[BUF_SZ];
|
static uint8_t s_buffer[BUF_SZ];
|
||||||
|
|
||||||
printf("s_buffer start %08x end %08x\n", (intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ);
|
printf("s_buffer start %08x end %08x\n", (intptr_t)s_buffer, (intptr_t)s_buffer + BUF_SZ);
|
||||||
|
@@ -2,12 +2,30 @@
|
|||||||
#include "multi_heap.h"
|
#include "multi_heap.h"
|
||||||
|
|
||||||
#include "../multi_heap_config.h"
|
#include "../multi_heap_config.h"
|
||||||
|
#include "../tlsf/tlsf.h"
|
||||||
#include "../tlsf/tlsf_common.h"
|
#include "../tlsf/tlsf_common.h"
|
||||||
#include "../tlsf/tlsf_block_functions.h"
|
#include "../tlsf/tlsf_block_functions.h"
|
||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
|
/* The functions __malloc__ and __free__ are used to call the libc
|
||||||
|
* malloc and free and allocate memory from the host heap. Since the test
|
||||||
|
* `TEST_CASE("multi_heap many random allocations", "[multi_heap]")`
|
||||||
|
* calls multi_heap_allocation_impl() with sizes that can go up to 8MB,
|
||||||
|
* an allocatation on the heap will be prefered rather than the stack which
|
||||||
|
* might not have the necessary memory.
|
||||||
|
*/
|
||||||
|
static void *__malloc__(size_t bytes)
|
||||||
|
{
|
||||||
|
return malloc(bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __free__(void *ptr)
|
||||||
|
{
|
||||||
|
free(ptr);
|
||||||
|
}
|
||||||
|
|
||||||
/* Insurance against accidentally using libc heap functions in tests */
|
/* Insurance against accidentally using libc heap functions in tests */
|
||||||
#undef free
|
#undef free
|
||||||
#define free #error
|
#define free #error
|
||||||
@@ -61,10 +79,11 @@ TEST_CASE("multi_heap simple allocations", "[multi_heap]")
|
|||||||
|
|
||||||
TEST_CASE("multi_heap fragmentation", "[multi_heap]")
|
TEST_CASE("multi_heap fragmentation", "[multi_heap]")
|
||||||
{
|
{
|
||||||
uint8_t small_heap[4 * 1024];
|
const size_t HEAP_SIZE = 4 * 1024;
|
||||||
|
uint8_t small_heap[HEAP_SIZE];
|
||||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||||
|
|
||||||
const size_t alloc_size = 128;
|
const size_t alloc_size = 500;
|
||||||
|
|
||||||
void *p[4];
|
void *p[4];
|
||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
@@ -204,20 +223,22 @@ TEST_CASE("multi_heap defrag realloc", "[multi_heap]")
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
void multi_heap_allocation_impl(int heap_size)
|
||||||
{
|
{
|
||||||
uint8_t big_heap[8 * 1024];
|
uint8_t *big_heap = (uint8_t *) __malloc__(heap_size);
|
||||||
const int NUM_POINTERS = 64;
|
const int NUM_POINTERS = 64;
|
||||||
|
|
||||||
printf("Running multi-allocation test...\n");
|
printf("Running multi-allocation test with heap_size %d...\n", heap_size);
|
||||||
|
|
||||||
|
REQUIRE( big_heap );
|
||||||
|
multi_heap_handle_t heap = multi_heap_register(big_heap, heap_size);
|
||||||
|
|
||||||
void *p[NUM_POINTERS] = { 0 };
|
void *p[NUM_POINTERS] = { 0 };
|
||||||
size_t s[NUM_POINTERS] = { 0 };
|
size_t s[NUM_POINTERS] = { 0 };
|
||||||
multi_heap_handle_t heap = multi_heap_register(big_heap, sizeof(big_heap));
|
|
||||||
|
|
||||||
const size_t initial_free = multi_heap_free_size(heap);
|
const size_t initial_free = multi_heap_free_size(heap);
|
||||||
|
|
||||||
const int ITERATIONS = 10000;
|
const int ITERATIONS = 5000;
|
||||||
|
|
||||||
for (int i = 0; i < ITERATIONS; i++) {
|
for (int i = 0; i < ITERATIONS; i++) {
|
||||||
/* check all pointers allocated so far are valid inside big_heap */
|
/* check all pointers allocated so far are valid inside big_heap */
|
||||||
@@ -228,11 +249,11 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||||||
|
|
||||||
uint8_t n = rand() % NUM_POINTERS;
|
uint8_t n = rand() % NUM_POINTERS;
|
||||||
|
|
||||||
if (rand() % 4 == 0) {
|
if (i % 4 == 0) {
|
||||||
/* 1 in 4 iterations, try to realloc the buffer instead
|
/* 1 in 4 iterations, try to realloc the buffer instead
|
||||||
of using malloc/free
|
of using malloc/free
|
||||||
*/
|
*/
|
||||||
size_t new_size = rand() % 1024;
|
size_t new_size = (rand() % 1023) + 1;
|
||||||
void *new_p = multi_heap_realloc(heap, p[n], new_size);
|
void *new_p = multi_heap_realloc(heap, p[n], new_size);
|
||||||
printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size);
|
printf("realloc %p -> %p (%zu -> %zu)\n", p[n], new_p, s[n], new_size);
|
||||||
multi_heap_check(heap, true);
|
multi_heap_check(heap, true);
|
||||||
@@ -241,13 +262,12 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||||||
s[n] = new_size;
|
s[n] = new_size;
|
||||||
if (new_size > 0) {
|
if (new_size > 0) {
|
||||||
REQUIRE( p[n] >= big_heap );
|
REQUIRE( p[n] >= big_heap );
|
||||||
REQUIRE( p[n] < big_heap + sizeof(big_heap) );
|
REQUIRE( p[n] < big_heap + heap_size );
|
||||||
memset(p[n], n, new_size);
|
memset(p[n], n, new_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p[n] != NULL) {
|
if (p[n] != NULL) {
|
||||||
if (s[n] > 0) {
|
if (s[n] > 0) {
|
||||||
/* Verify pre-existing contents of p[n] */
|
/* Verify pre-existing contents of p[n] */
|
||||||
@@ -271,14 +291,13 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||||||
printf("malloc %p (%zu)\n", p[n], s[n]);
|
printf("malloc %p (%zu)\n", p[n], s[n]);
|
||||||
if (p[n] != NULL) {
|
if (p[n] != NULL) {
|
||||||
REQUIRE( p[n] >= big_heap );
|
REQUIRE( p[n] >= big_heap );
|
||||||
REQUIRE( p[n] < big_heap + sizeof(big_heap) );
|
REQUIRE( p[n] < big_heap + heap_size );
|
||||||
}
|
}
|
||||||
if (!multi_heap_check(heap, true)) {
|
if (!multi_heap_check(heap, true)) {
|
||||||
printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
|
printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]);
|
||||||
multi_heap_dump(heap);
|
multi_heap_dump(heap);
|
||||||
REQUIRE(0);
|
REQUIRE(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p[n] != NULL) {
|
if (p[n] != NULL) {
|
||||||
memset(p[n], n, s[n]);
|
memset(p[n], n, s[n]);
|
||||||
}
|
}
|
||||||
@@ -294,6 +313,15 @@ TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
|||||||
}
|
}
|
||||||
|
|
||||||
REQUIRE( initial_free == multi_heap_free_size(heap) );
|
REQUIRE( initial_free == multi_heap_free_size(heap) );
|
||||||
|
__free__(big_heap);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("multi_heap many random allocations", "[multi_heap]")
|
||||||
|
{
|
||||||
|
size_t poolsize[] = { 15, 255, 4095, 8191 };
|
||||||
|
for (size_t i = 0; i < sizeof(poolsize)/sizeof(size_t); i++) {
|
||||||
|
multi_heap_allocation_impl(poolsize[i] * 1024);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("multi_heap_get_info() function", "[multi_heap]")
|
TEST_CASE("multi_heap_get_info() function", "[multi_heap]")
|
||||||
@@ -393,8 +421,9 @@ TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]")
|
|||||||
|
|
||||||
TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
||||||
{
|
{
|
||||||
|
const size_t HEAP_SIZE = 4 * 1024;
|
||||||
const uint32_t PATTERN = 0xABABDADA;
|
const uint32_t PATTERN = 0xABABDADA;
|
||||||
uint8_t small_heap[4 * 1024];
|
uint8_t small_heap[HEAP_SIZE];
|
||||||
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap));
|
||||||
|
|
||||||
uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64);
|
uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64);
|
||||||
@@ -404,7 +433,6 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||||||
REQUIRE( b > a); /* 'b' takes the block after 'a' */
|
REQUIRE( b > a); /* 'b' takes the block after 'a' */
|
||||||
|
|
||||||
*a = PATTERN;
|
*a = PATTERN;
|
||||||
|
|
||||||
uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72);
|
uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72);
|
||||||
REQUIRE( multi_heap_check(heap, true));
|
REQUIRE( multi_heap_check(heap, true));
|
||||||
REQUIRE( c != NULL );
|
REQUIRE( c != NULL );
|
||||||
@@ -414,13 +442,12 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||||||
#ifndef MULTI_HEAP_POISONING_SLOW
|
#ifndef MULTI_HEAP_POISONING_SLOW
|
||||||
// "Slow" poisoning implementation doesn't reallocate in place, so these
|
// "Slow" poisoning implementation doesn't reallocate in place, so these
|
||||||
// test will fail...
|
// test will fail...
|
||||||
|
|
||||||
uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36);
|
uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36);
|
||||||
REQUIRE( multi_heap_check(heap, true) );
|
REQUIRE( multi_heap_check(heap, true) );
|
||||||
REQUIRE( c == d ); /* 'c' block should be shrunk in-place */
|
REQUIRE( c == d ); /* 'c' block should be shrunk in-place */
|
||||||
REQUIRE( *d == PATTERN);
|
REQUIRE( *d == PATTERN);
|
||||||
|
// biggest allocation possible to completely fill the block left free after it was reallocated
|
||||||
uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64);
|
uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 60);
|
||||||
REQUIRE( multi_heap_check(heap, true));
|
REQUIRE( multi_heap_check(heap, true));
|
||||||
REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */
|
REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */
|
||||||
|
|
||||||
@@ -429,11 +456,7 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||||||
REQUIRE( multi_heap_check(heap, true) );
|
REQUIRE( multi_heap_check(heap, true) );
|
||||||
REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */
|
REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */
|
||||||
|
|
||||||
#ifdef MULTI_HEAP_POISONING
|
#define TOO_MUCH HEAP_SIZE + 1
|
||||||
#define TOO_MUCH 7420 + 1
|
|
||||||
#else
|
|
||||||
#define TOO_MUCH 7420 + 1
|
|
||||||
#endif
|
|
||||||
/* not enough contiguous space left in the heap */
|
/* not enough contiguous space left in the heap */
|
||||||
uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH);
|
uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, TOO_MUCH);
|
||||||
REQUIRE( g == NULL );
|
REQUIRE( g == NULL );
|
||||||
@@ -443,7 +466,8 @@ TEST_CASE("multi_heap_realloc()", "[multi_heap]")
|
|||||||
g = (uint32_t *)multi_heap_realloc(heap, e, 128);
|
g = (uint32_t *)multi_heap_realloc(heap, e, 128);
|
||||||
REQUIRE( multi_heap_check(heap, true) );
|
REQUIRE( multi_heap_check(heap, true) );
|
||||||
REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */
|
REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */
|
||||||
#endif
|
|
||||||
|
#endif // MULTI_HEAP_POISONING_SLOW
|
||||||
}
|
}
|
||||||
|
|
||||||
// TLSF only accepts heaps aligned to 4-byte boundary so
|
// TLSF only accepts heaps aligned to 4-byte boundary so
|
||||||
@@ -542,8 +566,12 @@ TEST_CASE("multi_heap poisoning detection", "[multi_heap]")
|
|||||||
/* register the heap memory. One free block only will be available */
|
/* register the heap memory. One free block only will be available */
|
||||||
multi_heap_handle_t heap = multi_heap_register(heap_mem, HEAP_SIZE);
|
multi_heap_handle_t heap = multi_heap_register(heap_mem, HEAP_SIZE);
|
||||||
|
|
||||||
|
control_t *tlsf_ptr = (control_t*)(heap_mem + 20);
|
||||||
|
const size_t control_t_size = tlsf_ptr->size;
|
||||||
|
const size_t heap_t_size = 20;
|
||||||
|
|
||||||
/* offset in memory at which to find the first free memory byte */
|
/* offset in memory at which to find the first free memory byte */
|
||||||
const size_t free_memory_offset = sizeof(multi_heap_info_t) + sizeof(control_t) + block_header_overhead;
|
const size_t free_memory_offset = heap_t_size + control_t_size + sizeof(block_header_t) - block_header_overhead;
|
||||||
|
|
||||||
/* block header of the free block under test in the heap () */
|
/* block header of the free block under test in the heap () */
|
||||||
const block_header_t* block = (block_header_t*)(heap_mem + free_memory_offset - sizeof(block_header_t));
|
const block_header_t* block = (block_header_t*)(heap_mem + free_memory_offset - sizeof(block_header_t));
|
||||||
|
Submodule components/heap/tlsf updated: ab17d6798d...13da0fff7f
@@ -1,51 +0,0 @@
|
|||||||
/*
|
|
||||||
* SPDX-FileCopyrightText: 2017-2022 Espressif Systems (Shanghai) CO LTD
|
|
||||||
*
|
|
||||||
* SPDX-License-Identifier: Apache-2.0
|
|
||||||
*/
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <stddef.h>
|
|
||||||
#include <stdbool.h>
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
#include "soc/soc.h"
|
|
||||||
|
|
||||||
#if !CONFIG_SPIRAM
|
|
||||||
#define TLSF_MAX_POOL_SIZE (SOC_DIRAM_DRAM_HIGH - SOC_DIRAM_DRAM_LOW)
|
|
||||||
#else
|
|
||||||
#define TLSF_MAX_POOL_SIZE SOC_EXTRAM_DATA_SIZE
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (TLSF_MAX_POOL_SIZE <= (256 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 18 //Each pool can have up 256KB
|
|
||||||
#elif (TLSF_MAX_POOL_SIZE <= (512 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 19 //Each pool can have up 512KB
|
|
||||||
#elif (TLSF_MAX_POOL_SIZE <= (1 * 1024 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 20 //Each pool can have up 1MB
|
|
||||||
#elif (TLSF_MAX_POOL_SIZE <= (2 * 1024 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 21 //Each pool can have up 2MB
|
|
||||||
#elif (TLSF_MAX_POOL_SIZE <= (4 * 1024 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 22 //Each pool can have up 4MB
|
|
||||||
#elif (TLSF_MAX_POOL_SIZE <= (8 * 1024 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 23 //Each pool can have up 8MB
|
|
||||||
#elif (TLSF_MAX_POOL_SIZE <= (16 * 1024 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 24 //Each pool can have up 16MB
|
|
||||||
#elif (TLSF_MAX_POOL_SIZE <= (32 * 1024 * 1024))
|
|
||||||
#define FL_INDEX_MAX_PLATFORM 25 //Each pool can have up 32MB
|
|
||||||
#else
|
|
||||||
#error "Higher TLSF pool sizes should be added for this new config"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Include from the TLSF submodule to force TLSF_INDEX_MAX_PLATFORM to be defined
|
|
||||||
* when the TLSF repository is compiled in the IDF environment. */
|
|
||||||
#include "tlsf_common.h"
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
Reference in New Issue
Block a user