diff --git a/.gitignore b/.gitignore index e571eeb68f..6b853d91ce 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,8 @@ tools/unit-test-app/build # AWS IoT Examples require device-specific certs/keys examples/protocols/aws_iot/*/main/certs/*.pem.* +# gcov coverage reports +*.gcda +*.gcno +coverage.info +coverage_report/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 219fc1b85c..465a8c7106 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -221,6 +221,15 @@ test_wl_on_host: - cd components/wear_levelling/test_wl_host - make test +test_multi_heap_on_host: + stage: test + image: $CI_DOCKER_REGISTRY/esp32-ci-env + tags: + - wl_host_test + script: + - cd components/heap/test_multi_heap_host + - make test + test_build_system: stage: test image: $CI_DOCKER_REGISTRY/esp32-ci-env diff --git a/components/driver/spi_common.c b/components/driver/spi_common.c index 168626e6fc..db0dc682da 100644 --- a/components/driver/spi_common.c +++ b/components/driver/spi_common.c @@ -31,7 +31,7 @@ #include "rom/lldesc.h" #include "driver/gpio.h" #include "driver/periph_ctrl.h" -#include "esp_heap_alloc_caps.h" +#include "esp_heap_caps.h" #include "driver/spi_common.h" diff --git a/components/driver/spi_master.c b/components/driver/spi_master.c index aa53b55725..f34d5bc90c 100644 --- a/components/driver/spi_master.c +++ b/components/driver/spi_master.c @@ -53,11 +53,12 @@ queue and re-enabling the interrupt will trigger the interrupt again, which can #include "freertos/task.h" #include "freertos/ringbuf.h" #include "soc/soc.h" +#include "soc/soc_memory_layout.h" #include "soc/dport_reg.h" #include "rom/lldesc.h" #include "driver/gpio.h" #include "driver/periph_ctrl.h" -#include "esp_heap_alloc_caps.h" +#include "esp_heap_caps.h" typedef struct spi_device_t spi_device_t; @@ -122,8 +123,8 @@ esp_err_t spi_bus_initialize(spi_host_device_t host, const spi_bus_config_t *bus int dma_desc_ct=(bus_config->max_transfer_sz+SPI_MAX_DMA_LEN-1)/SPI_MAX_DMA_LEN; if (dma_desc_ct==0) dma_desc_ct=1; //default to 4k when max is not given spihost[host]->max_transfer_sz = dma_desc_ct*SPI_MAX_DMA_LEN; - spihost[host]->dmadesc_tx=pvPortMallocCaps(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); - spihost[host]->dmadesc_rx=pvPortMallocCaps(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); + spihost[host]->dmadesc_tx=heap_caps_malloc(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); + spihost[host]->dmadesc_rx=heap_caps_malloc(sizeof(lldesc_t)*dma_desc_ct, MALLOC_CAP_DMA); if (!spihost[host]->dmadesc_tx || !spihost[host]->dmadesc_rx) goto nomem; } esp_intr_alloc(spicommon_irqsource_for_host(host), ESP_INTR_FLAG_INTRDISABLED, spi_intr, (void*)spihost[host], &spihost[host]->intr); diff --git a/components/driver/spi_slave.c b/components/driver/spi_slave.c index 2cff6e00b6..7600b4e45c 100644 --- a/components/driver/spi_slave.c +++ b/components/driver/spi_slave.c @@ -32,11 +32,12 @@ #include "freertos/task.h" #include "freertos/ringbuf.h" #include "soc/soc.h" +#include "soc/soc_memory_layout.h" #include "soc/dport_reg.h" #include "rom/lldesc.h" #include "driver/gpio.h" #include "driver/periph_ctrl.h" -#include "esp_heap_alloc_caps.h" +#include "esp_heap_caps.h" static const char *SPI_TAG = "spi_slave"; #define SPI_CHECK(a, str, ret_val) \ @@ -89,8 +90,8 @@ esp_err_t spi_slave_initialize(spi_host_device_t host, const spi_bus_config_t *b int dma_desc_ct = (bus_config->max_transfer_sz + SPI_MAX_DMA_LEN - 1) / SPI_MAX_DMA_LEN; if (dma_desc_ct == 0) dma_desc_ct = 1; //default to 4k when max is not given spihost[host]->max_transfer_sz = dma_desc_ct * SPI_MAX_DMA_LEN; - spihost[host]->dmadesc_tx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA); - spihost[host]->dmadesc_rx = pvPortMallocCaps(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA); + spihost[host]->dmadesc_tx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA); + spihost[host]->dmadesc_rx = heap_caps_malloc(sizeof(lldesc_t) * dma_desc_ct, MALLOC_CAP_DMA); if (!spihost[host]->dmadesc_tx || !spihost[host]->dmadesc_rx) goto nomem; } else { //We're limited to non-DMA transfers: the SPI work registers can hold 64 bytes at most. diff --git a/components/driver/test/test_spi_master.c b/components/driver/test/test_spi_master.c index 86eea5cd46..e58371f246 100644 --- a/components/driver/test/test_spi_master.c +++ b/components/driver/test/test_spi_master.c @@ -18,7 +18,7 @@ #include "soc/dport_reg.h" #include "soc/spi_reg.h" #include "soc/spi_struct.h" -#include "esp_heap_alloc_caps.h" +#include "esp_heap_caps.h" static void check_spi_pre_n_for(int clk, int pre, int n) @@ -119,8 +119,8 @@ static void spi_test(spi_device_handle_t handle, int num_bytes) { esp_err_t ret; int x; srand(num_bytes); - char *sendbuf=pvPortMallocCaps(num_bytes, MALLOC_CAP_DMA); - char *recvbuf=pvPortMallocCaps(num_bytes, MALLOC_CAP_DMA); + char *sendbuf=heap_caps_malloc(num_bytes, MALLOC_CAP_DMA); + char *recvbuf=heap_caps_malloc(num_bytes, MALLOC_CAP_DMA); for (x=0; x - -#include - -#include "esp_heap_alloc_caps.h" -#include "spiram.h" -#include "esp_log.h" -#include - -static const char* TAG = "heap_alloc_caps"; - -/* -This file, combined with a region allocator that supports tags, solves the problem that the ESP32 has RAM that's -slightly heterogeneous. Some RAM can be byte-accessed, some allows only 32-bit accesses, some can execute memory, -some can be remapped by the MMU to only be accessed by a certain PID etc. In order to allow the most flexible -memory allocation possible, this code makes it possible to request memory that has certain capabilities. The -code will then use its knowledge of how the memory is configured along with a priority scheme to allocate that -memory in the most sane way possible. This should optimize the amount of RAM accessible to the code without -hardwiring addresses. -*/ - - -//Amount of priority slots for the tag descriptors. -#define NO_PRIOS 3 - - -typedef struct { - const char *name; - uint32_t prio[NO_PRIOS]; - bool aliasedIram; -} tag_desc_t; - -/* -Tag descriptors. These describe the capabilities of a bit of memory that's tagged with the index into this table. -Each tag contains NO_PRIOS entries; later entries are only taken if earlier ones can't fulfill the memory request. -Make sure there are never more than HEAPREGIONS_MAX_TAGCOUNT (in heap_regions.h) tags (ex the last empty marker) - -WARNING: The current code assumes the ROM stacks are located in tag 1; no allocation from this tag can be done until -the FreeRTOS scheduler has started. -*/ -static const tag_desc_t tag_desc[]={ - { "DRAM", { MALLOC_CAP_DMA|MALLOC_CAP_8BIT, MALLOC_CAP_32BIT, 0 }, false}, //Tag 0: Plain ole D-port RAM - { "D/IRAM", { 0, MALLOC_CAP_DMA|MALLOC_CAP_8BIT, MALLOC_CAP_32BIT|MALLOC_CAP_EXEC }, true}, //Tag 1: Plain ole D-port RAM which has an alias on the I-port - { "IRAM", { MALLOC_CAP_EXEC|MALLOC_CAP_32BIT, 0, 0 }, false}, //Tag 2: IRAM - { "PID2IRAM", { MALLOC_CAP_PID2, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false}, //Tag 3-8: PID 2-7 IRAM - { "PID3IRAM", { MALLOC_CAP_PID3, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false}, // - { "PID4IRAM", { MALLOC_CAP_PID4, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false}, // - { "PID5IRAM", { MALLOC_CAP_PID5, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false}, // - { "PID6IRAM", { MALLOC_CAP_PID6, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false}, // - { "PID7IRAM", { MALLOC_CAP_PID7, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false}, // - { "PID2DRAM", { MALLOC_CAP_PID2, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false}, //Tag 9-14: PID 2-7 DRAM - { "PID3DRAM", { MALLOC_CAP_PID3, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false}, // - { "PID4DRAM", { MALLOC_CAP_PID4, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false}, // - { "PID5DRAM", { MALLOC_CAP_PID5, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false}, // - { "PID6DRAM", { MALLOC_CAP_PID6, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false}, // - { "PID7DRAM", { MALLOC_CAP_PID7, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false}, // - { "SPISRAM", { MALLOC_CAP_SPISRAM, 0, MALLOC_CAP_DMA|MALLOC_CAP_8BIT|MALLOC_CAP_32BIT}, false}, //Tag 15: SPI SRAM data - { "", { MALLOC_CAP_INVALID, MALLOC_CAP_INVALID, MALLOC_CAP_INVALID }, false} //End -}; - -/* -Region descriptors. These describe all regions of memory available, and tag them according to the -capabilities the hardware has. This array is not marked constant; the initialization code may want to -change the tags of some regions because eg BT is detected, applications are loaded etc. - -The priorities here roughly work like this: -- For a normal malloc (MALLOC_CAP_8BIT), give away the DRAM-only memory first, then pass off any dual-use IRAM regions, - finally eat into the application memory. -- For a malloc where 32-bit-aligned-only access is okay, first allocate IRAM, then DRAM, finally application IRAM. -- Application mallocs (PIDx) will allocate IRAM first, if possible, then DRAM. -- Most other malloc caps only fit in one region anyway. - -These region descriptors are very ESP32 specific, because they describe the memory pools available there. - -Because of requirements in the coalescing code as well as the heap allocator itself, this list should always -be sorted from low to high start address. - -This array is *NOT* const because it gets modified depending on what pools are/aren't available. -*/ -static HeapRegionTagged_t regions[]={ - { (uint8_t *)0x3F800000, 0x20000, 15, 0}, //SPI SRAM, if available - { (uint8_t *)0x3FFAE000, 0x2000, 0, 0}, //pool 16 <- used for rom code - { (uint8_t *)0x3FFB0000, 0x8000, 0, 0}, //pool 15 <- if BT is enabled, used as BT HW shared memory - { (uint8_t *)0x3FFB8000, 0x8000, 0, 0}, //pool 14 <- if BT is enabled, used data memory for BT ROM functions. - { (uint8_t *)0x3FFC0000, 0x2000, 0, 0}, //pool 10-13, mmu page 0 - { (uint8_t *)0x3FFC2000, 0x2000, 0, 0}, //pool 10-13, mmu page 1 - { (uint8_t *)0x3FFC4000, 0x2000, 0, 0}, //pool 10-13, mmu page 2 - { (uint8_t *)0x3FFC6000, 0x2000, 0, 0}, //pool 10-13, mmu page 3 - { (uint8_t *)0x3FFC8000, 0x2000, 0, 0}, //pool 10-13, mmu page 4 - { (uint8_t *)0x3FFCA000, 0x2000, 0, 0}, //pool 10-13, mmu page 5 - { (uint8_t *)0x3FFCC000, 0x2000, 0, 0}, //pool 10-13, mmu page 6 - { (uint8_t *)0x3FFCE000, 0x2000, 0, 0}, //pool 10-13, mmu page 7 - { (uint8_t *)0x3FFD0000, 0x2000, 0, 0}, //pool 10-13, mmu page 8 - { (uint8_t *)0x3FFD2000, 0x2000, 0, 0}, //pool 10-13, mmu page 9 - { (uint8_t *)0x3FFD4000, 0x2000, 0, 0}, //pool 10-13, mmu page 10 - { (uint8_t *)0x3FFD6000, 0x2000, 0, 0}, //pool 10-13, mmu page 11 - { (uint8_t *)0x3FFD8000, 0x2000, 0, 0}, //pool 10-13, mmu page 12 - { (uint8_t *)0x3FFDA000, 0x2000, 0, 0}, //pool 10-13, mmu page 13 - { (uint8_t *)0x3FFDC000, 0x2000, 0, 0}, //pool 10-13, mmu page 14 - { (uint8_t *)0x3FFDE000, 0x2000, 0, 0}, //pool 10-13, mmu page 15 - { (uint8_t *)0x3FFE0000, 0x4000, 1, 0x400BC000}, //pool 9 blk 1 - { (uint8_t *)0x3FFE4000, 0x4000, 1, 0x400B8000}, //pool 9 blk 0 - { (uint8_t *)0x3FFE8000, 0x8000, 1, 0x400B0000}, //pool 8 <- can be remapped to ROM, used for MAC dump - { (uint8_t *)0x3FFF0000, 0x8000, 1, 0x400A8000}, //pool 7 <- can be used for MAC dump - { (uint8_t *)0x3FFF8000, 0x4000, 1, 0x400A4000}, //pool 6 blk 1 <- can be used as trace memory - { (uint8_t *)0x3FFFC000, 0x4000, 1, 0x400A0000}, //pool 6 blk 0 <- can be used as trace memory - { (uint8_t *)0x40070000, 0x8000, 2, 0}, //pool 0 - { (uint8_t *)0x40078000, 0x8000, 2, 0}, //pool 1 - { (uint8_t *)0x40080000, 0x2000, 2, 0}, //pool 2-5, mmu page 0 - { (uint8_t *)0x40082000, 0x2000, 2, 0}, //pool 2-5, mmu page 1 - { (uint8_t *)0x40084000, 0x2000, 2, 0}, //pool 2-5, mmu page 2 - { (uint8_t *)0x40086000, 0x2000, 2, 0}, //pool 2-5, mmu page 3 - { (uint8_t *)0x40088000, 0x2000, 2, 0}, //pool 2-5, mmu page 4 - { (uint8_t *)0x4008A000, 0x2000, 2, 0}, //pool 2-5, mmu page 5 - { (uint8_t *)0x4008C000, 0x2000, 2, 0}, //pool 2-5, mmu page 6 - { (uint8_t *)0x4008E000, 0x2000, 2, 0}, //pool 2-5, mmu page 7 - { (uint8_t *)0x40090000, 0x2000, 2, 0}, //pool 2-5, mmu page 8 - { (uint8_t *)0x40092000, 0x2000, 2, 0}, //pool 2-5, mmu page 9 - { (uint8_t *)0x40094000, 0x2000, 2, 0}, //pool 2-5, mmu page 10 - { (uint8_t *)0x40096000, 0x2000, 2, 0}, //pool 2-5, mmu page 11 - { (uint8_t *)0x40098000, 0x2000, 2, 0}, //pool 2-5, mmu page 12 - { (uint8_t *)0x4009A000, 0x2000, 2, 0}, //pool 2-5, mmu page 13 - { (uint8_t *)0x4009C000, 0x2000, 2, 0}, //pool 2-5, mmu page 14 - { (uint8_t *)0x4009E000, 0x2000, 2, 0}, //pool 2-5, mmu page 15 - { NULL, 0, 0, 0} //end -}; - -/* For the startup code, the stacks live in memory tagged by this tag. Hence, we only enable allocating from this tag - once FreeRTOS has started up completely. */ -#define NONOS_STACK_TAG 1 - -static bool nonos_stack_in_use=true; - -void heap_alloc_enable_nonos_stack_tag() -{ - nonos_stack_in_use=false; -} - -//Modify regions array to disable the given range of memory. -static void disable_mem_region(void *from, void *to) { - int i; - //Align from and to on word boundaries - from=(void*)((uint32_t)from&~3); - to=(void*)(((uint32_t)to+3)&~3); - for (i=0; regions[i].xSizeInBytes!=0; i++) { - void *regStart=regions[i].pucStartAddress; - void *regEnd=regions[i].pucStartAddress+regions[i].xSizeInBytes; - if (regStart>=from && regEnd<=to) { - //Entire region falls in the range. Disable entirely. - regions[i].xTag=-1; - } else if (regStart>=from && regEnd>to && regStartfrom && regEnd<=to) { - //End of the region falls in the range. Modify length. - regions[i].xSizeInBytes-=(uint8_t *)regEnd-(uint8_t *)from; - } else if (regStartto) { - //Range punches a hole in the region! We do not support this. - ESP_EARLY_LOGE(TAG, "region %d: hole punching is not supported!", i); - regions[i].xTag=-1; //Just disable memory region. That'll teach them! - } - } -} - - -/* -Warning: These variables are assumed to have the start and end of the data and iram -area used statically by the program, respectively. These variables are defined in the ld -file. -*/ -extern int _data_start, _heap_start, _init_start, _iram_text_end; - -/* -Initialize the heap allocator. We pass it a bunch of region descriptors, but we need to modify those first to accommodate for -the data as loaded by the bootloader. -ToDo: The regions are different when stuff like trace memory, BT, ... is used. Modify the regions struct on the fly for this. -Same with loading of apps. Same with using SPI RAM. -*/ -void heap_alloc_caps_init() { - int i; - //Compile-time assert to see if we don't have more tags than is set in heap_regions.h - _Static_assert((sizeof(tag_desc)/sizeof(tag_desc[0]))-1 <= HEAPREGIONS_MAX_TAGCOUNT, "More than HEAPREGIONS_MAX_TAGCOUNT tags defined!"); - //Disable the bits of memory where this code is loaded. - disable_mem_region(&_data_start, &_heap_start); //DRAM used by bss/data static variables - disable_mem_region(&_init_start, &_iram_text_end); //IRAM used by code - disable_mem_region((void*)0x40070000, (void*)0x40078000); //CPU0 cache region - disable_mem_region((void*)0x40078000, (void*)0x40080000); //CPU1 cache region - - /* Warning: The ROM stack is located in the 0x3ffe0000 area. We do not specifically disable that area here because - after the scheduler has started, the ROM stack is not used anymore by anything. We handle it instead by not allowing - any mallocs from tag 1 (the IRAM/DRAM region) until the scheduler has started. - - The 0x3ffe0000 region also contains static RAM for various ROM functions. The following lines - reserve the regions for UART and ETSC, so these functions are usable. Libraries like xtos, which are - not usable in FreeRTOS anyway, are commented out in the linker script so they cannot be used; we - do not disable their memory regions here and they will be used as general purpose heap memory. - - Enabling the heap allocator for this region but disabling allocation here until FreeRTOS is started up - is a somewhat risky action in theory, because on initializing the allocator, vPortDefineHeapRegionsTagged - will go and write linked list entries at the start and end of all regions. For the ESP32, these linked - list entries happen to end up in a region that is not touched by the stack; they can be placed safely there.*/ - disable_mem_region((void*)0x3ffe0000, (void*)0x3ffe0440); //Reserve ROM PRO data region - disable_mem_region((void*)0x3ffe4000, (void*)0x3ffe4350); //Reserve ROM APP data region - -#if CONFIG_BT_ENABLED -#if CONFIG_BT_DRAM_RELEASE - disable_mem_region((void*)0x3ffb0000, (void*)0x3ffb3000); //Reserve BT data region - disable_mem_region((void*)0x3ffb8000, (void*)0x3ffbbb28); //Reserve BT data region - disable_mem_region((void*)0x3ffbdb28, (void*)0x3ffc0000); //Reserve BT data region -#else - disable_mem_region((void*)0x3ffb0000, (void*)0x3ffc0000); //Reserve BT hardware shared memory & BT data region -#endif - disable_mem_region((void*)0x3ffae000, (void*)0x3ffaff10); //Reserve ROM data region, inc region needed for BT ROM routines -#else - disable_mem_region((void*)0x3ffae000, (void*)0x3ffae2a0); //Reserve ROM data region -#endif - -#if CONFIG_MEMMAP_TRACEMEM -#if CONFIG_MEMMAP_TRACEMEM_TWOBANKS - disable_mem_region((void*)0x3fff8000, (void*)0x40000000); //Reserve trace mem region -#else - disable_mem_region((void*)0x3fff8000, (void*)0x3fffc000); //Reserve trace mem region -#endif -#endif - -#if 0 - enable_spi_sram(); -#else - disable_mem_region((void*)0x3f800000, (void*)0x3f820000); //SPI SRAM not installed -#endif - - //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory, - //it's useful to coalesce adjacent regions that have the same tag. - - for (i=1; regions[i].xSizeInBytes!=0; i++) { - if (regions[i].pucStartAddress == (regions[i-1].pucStartAddress + regions[i-1].xSizeInBytes) && - regions[i].xTag == regions[i-1].xTag ) { - regions[i-1].xTag=-1; - regions[i].pucStartAddress=regions[i-1].pucStartAddress; - regions[i].xSizeInBytes+=regions[i-1].xSizeInBytes; - } - } - - ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:"); - for (i=0; regions[i].xSizeInBytes!=0; i++) { - if (regions[i].xTag != -1) { - ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s", - (int)regions[i].pucStartAddress, regions[i].xSizeInBytes, regions[i].xSizeInBytes/1024, tag_desc[regions[i].xTag].name); - } - } - //Initialize the malloc implementation. - vPortDefineHeapRegionsTagged( regions ); -} - -//First and last words of the D/IRAM region, for both the DRAM address as well as the IRAM alias. -#define DIRAM_IRAM_START 0x400A0000 -#define DIRAM_IRAM_END 0x400BFFFC -#define DIRAM_DRAM_START 0x3FFE0000 -#define DIRAM_DRAM_END 0x3FFFFFFC - -/* - This takes a memory chunk in a region that can be addressed as both DRAM as well as IRAM. It will convert it to - IRAM in such a way that it can be later freed. It assumes both the address as wel as the length to be word-aligned. - It returns a region that's 1 word smaller than the region given because it stores the original Dram address there. - - In theory, we can also make this work by prepending a struct that looks similar to the block link struct used by the - heap allocator itself, which will allow inspection tools relying on any block returned from any sort of malloc to - have such a block in front of it, work. We may do this later, if/when there is demand for it. For now, a simple - pointer is used. -*/ -static void *dram_alloc_to_iram_addr(void *addr, size_t len) -{ - uint32_t dstart=(int)addr; //First word - uint32_t dend=((int)addr)+len-4; //Last word - configASSERT(dstart>=DIRAM_DRAM_START); - configASSERT(dend<=DIRAM_DRAM_END); - configASSERT((dstart&3)==0); - configASSERT((dend&3)==0); - uint32_t istart=DIRAM_IRAM_START+(DIRAM_DRAM_END-dend); - uint32_t *iptr=(uint32_t*)istart; - *iptr=dstart; - return (void*)(iptr+1); -} - -/* -Standard malloc() implementation. Will return standard no-frills byte-accessible data memory. -*/ -void *pvPortMalloc( size_t xWantedSize ) -{ - return pvPortMallocCaps( xWantedSize, MALLOC_CAP_8BIT ); -} - -/* - Standard free() implementation. Will pass memory on to the allocator unless it's an IRAM address where the - actual meory is allocated in DRAM, it will convert to the DRAM address then. - */ -void vPortFree( void *pv ) -{ - if (((int)pv>=DIRAM_IRAM_START) && ((int)pv<=DIRAM_IRAM_END)) { - //Memory allocated here is actually allocated in the DRAM alias region and - //cannot be de-allocated as usual. dram_alloc_to_iram_addr stores a pointer to - //the equivalent DRAM address, though; free that. - uint32_t* dramAddrPtr=(uint32_t*)pv; - return vPortFreeTagged((void*)dramAddrPtr[-1]); - } - - return vPortFreeTagged(pv); -} - -/* -Routine to allocate a bit of memory with certain capabilities. caps is a bitfield of MALLOC_CAP_* bits. -*/ -void *pvPortMallocCaps( size_t xWantedSize, uint32_t caps ) -{ - int prio; - int tag, j; - void *ret=NULL; - uint32_t remCaps; - if (caps & MALLOC_CAP_EXEC) { - //MALLOC_CAP_EXEC forces an alloc from IRAM. There is a region which has both this - //as well as the following caps, but the following caps are not possible for IRAM. - //Thus, the combination is impossible and we return NULL directly, even although our tag_desc - //table would indicate there is a tag for this. - if ((caps & MALLOC_CAP_8BIT) || (caps & MALLOC_CAP_DMA)) { - return NULL; - } - //If any, EXEC memory should be 32-bit aligned, so round up to the next multiple of 4. - xWantedSize=(xWantedSize+3)&(~3); - } - for (prio=0; prio= 0x3FFAE000 && (int)ptr < 0x40000000 ); -} - -#ifdef __cplusplus -} -#endif - -#endif //HEAP_ALLOC_CAPS_H diff --git a/components/esp32/include/esp_system.h b/components/esp32/include/esp_system.h index 63592081f1..fb7c466e51 100644 --- a/components/esp32/include/esp_system.h +++ b/components/esp32/include/esp_system.h @@ -95,6 +95,13 @@ uint32_t esp_get_free_heap_size(void); */ uint32_t system_get_free_heap_size(void) __attribute__ ((deprecated)); +/** + * @brief Get the minimum heap that has ever been available + * + * @return Minimum free heap ever available + */ +uint32_t esp_get_minimum_free_heap_size( void ); + /** * @brief Get one random 32-bit word from hardware RNG * diff --git a/components/esp32/include/heap_alloc_caps.h b/components/esp32/include/heap_alloc_caps.h deleted file mode 100644 index edab15d52d..0000000000 --- a/components/esp32/include/heap_alloc_caps.h +++ /dev/null @@ -1,3 +0,0 @@ -#pragma once -#warning heap_alloc_caps.h has been renamed to esp_heap_alloc_caps.h. The old header file is deprecated and will be removed in v3.0. -#include "esp_heap_alloc_caps.h" diff --git a/components/esp32/ld/esp32.common.ld b/components/esp32/ld/esp32.common.ld index 475b2b2bfd..e80ad0b076 100644 --- a/components/esp32/ld/esp32.common.ld +++ b/components/esp32/ld/esp32.common.ld @@ -83,11 +83,12 @@ SECTIONS _iram_text_start = ABSOLUTE(.); *(.iram1 .iram1.*) *libfreertos.a:(.literal .text .literal.* .text.*) + *libheap.a:multi_heap.o(.literal .text .literal.* .text.*) *libesp32.a:panic.o(.literal .text .literal.* .text.*) *libesp32.a:core_dump.o(.literal .text .literal.* .text.*) - *libesp32.a:heap_alloc_caps.o(.literal .text .literal.* .text.*) *libapp_trace.a:(.literal .text .literal.* .text.*) *libxtensa-debug-module.a:eri.o(.literal .text .literal.* .text.*) + *libesp32.a:app_trace.o(.literal .text .literal.* .text.*) *libphy.a:(.literal .text .literal.* .text.*) *librtc.a:(.literal .text .literal.* .text.*) *libsoc.a:(.literal .text .literal.* .text.*) @@ -114,6 +115,7 @@ SECTIONS *libesp32.a:panic.o(.rodata .rodata.*) *libphy.a:(.rodata .rodata.*) *libapp_trace.a:(.rodata .rodata.*) + *libheap.a:multi_heap.o(.rodata .rodata.*) _data_end = ABSOLUTE(.); . = ALIGN(4); } >dram0_0_seg diff --git a/components/esp32/system_api.c b/components/esp32/system_api.c index c79761fd7d..f322c93a3b 100644 --- a/components/esp32/system_api.c +++ b/components/esp32/system_api.c @@ -33,6 +33,7 @@ #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "freertos/xtensa_api.h" +#include "esp_heap_caps.h" static const char* TAG = "system_api"; @@ -330,9 +331,19 @@ void IRAM_ATTR esp_restart_noos() void system_restart(void) __attribute__((alias("esp_restart"))); -uint32_t esp_get_free_heap_size(void) +void system_restore(void) { - return xPortGetFreeHeapSize(); + esp_wifi_restore(); +} + +uint32_t esp_get_free_heap_size( void ) +{ + return heap_caps_get_free_size( MALLOC_CAP_8BIT ); +} + +uint32_t esp_get_minimum_free_heap_size( void ) +{ + return heap_caps_get_minimum_free_size( MALLOC_CAP_8BIT ); } uint32_t system_get_free_heap_size(void) __attribute__((alias("esp_get_free_heap_size"))); diff --git a/components/esp32/test/test_malloc_caps.c b/components/esp32/test/test_malloc_caps.c deleted file mode 100644 index 0f5129ad20..0000000000 --- a/components/esp32/test/test_malloc_caps.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - Tests for the capabilities-based memory allocator. -*/ - -#include -#include -#include "unity.h" -#include "rom/ets_sys.h" -#include "esp_heap_alloc_caps.h" -#include - - -TEST_CASE("Capabilities allocator test", "[esp32]") -{ - char *m1, *m2[10]; - int x; - size_t free8start, free32start, free8, free32; - free8start=xPortGetFreeHeapSizeCaps(MALLOC_CAP_8BIT); - free32start=xPortGetFreeHeapSizeCaps(MALLOC_CAP_32BIT); - printf("Free 8bit-capable memory: %dK, 32-bit capable memory %dK\n", free8start, free32start); - TEST_ASSERT(free32start>free8start); - printf("Allocating 10K of 8-bit capable RAM\n"); - m1=pvPortMallocCaps(10*1024, MALLOC_CAP_8BIT); - printf("--> %p\n", m1); - free8=xPortGetFreeHeapSizeCaps(MALLOC_CAP_8BIT); - free32=xPortGetFreeHeapSizeCaps(MALLOC_CAP_32BIT); - printf("Free 8bit-capable memory: %dK, 32-bit capable memory %dK\n", free8, free32); - //Both should have gone down by 10K; 8bit capable ram is also 32-bit capable - TEST_ASSERT(free8<(free8start-10*1024)); - TEST_ASSERT(free32<(free32start-10*1024)); - //Assume we got DRAM back - TEST_ASSERT((((int)m1)&0xFF000000)==0x3F000000); - free(m1); - printf("Freeing; allocating 10K of 32K-capable RAM\n"); - m1=pvPortMallocCaps(10*1024, MALLOC_CAP_32BIT); - printf("--> %p\n", m1); - free8=xPortGetFreeHeapSizeCaps(MALLOC_CAP_8BIT); - free32=xPortGetFreeHeapSizeCaps(MALLOC_CAP_32BIT); - printf("Free 8bit-capable memory: %dK, 32-bit capable memory %dK\n", free8, free32); - //Only 32-bit should have gone down by 10K: 32-bit isn't necessarily 8bit capable - TEST_ASSERT(free32<(free32start-10*1024)); - TEST_ASSERT(free8==free8start); - //Assume we got IRAM back - TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000); - free(m1); - printf("Allocating impossible caps\n"); - m1=pvPortMallocCaps(10*1024, MALLOC_CAP_8BIT|MALLOC_CAP_EXEC); - printf("--> %p\n", m1); - TEST_ASSERT(m1==NULL); - printf("Testing changeover iram -> dram"); - for (x=0; x<10; x++) { - m2[x]=pvPortMallocCaps(10*1024, MALLOC_CAP_32BIT); - printf("--> %p\n", m2[x]); - } - TEST_ASSERT((((int)m2[0])&0xFF000000)==0x40000000); - TEST_ASSERT((((int)m2[9])&0xFF000000)==0x3F000000); - printf("Test if allocating executable code still gives IRAM, even with dedicated IRAM region depleted\n"); - m1=pvPortMallocCaps(10*1024, MALLOC_CAP_EXEC); - printf("--> %p\n", m1); - TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000); - free(m1); - for (x=0; x<10; x++) free(m2[x]); - printf("Done.\n"); -} diff --git a/components/freertos/heap_regions.c b/components/freertos/heap_regions.c deleted file mode 100644 index 5cece756c0..0000000000 --- a/components/freertos/heap_regions.c +++ /dev/null @@ -1,591 +0,0 @@ -/* - FreeRTOS V8.2.0 - Copyright (C) 2015 Real Time Engineers Ltd. - All rights reserved - - VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION. - - This file is part of the FreeRTOS distribution. - - FreeRTOS is free software; you can redistribute it and/or modify it under - the terms of the GNU General Public License (version 2) as published by the - Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception. - - *************************************************************************** - >>! NOTE: The modification to the GPL is included to allow you to !<< - >>! distribute a combined work that includes FreeRTOS without being !<< - >>! obliged to provide the source code for proprietary components !<< - >>! outside of the FreeRTOS kernel. !<< - *************************************************************************** - - FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. Full license text is available on the following - link: http://www.freertos.org/a00114.html - - *************************************************************************** - * * - * FreeRTOS provides completely free yet professionally developed, * - * robust, strictly quality controlled, supported, and cross * - * platform software that is more than just the market leader, it * - * is the industry's de facto standard. * - * * - * Help yourself get started quickly while simultaneously helping * - * to support the FreeRTOS project by purchasing a FreeRTOS * - * tutorial book, reference manual, or both: * - * http://www.FreeRTOS.org/Documentation * - * * - *************************************************************************** - - http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading - the FAQ page "My application does not run, what could be wrong?". Have you - defined configASSERT()? - - http://www.FreeRTOS.org/support - In return for receiving this top quality - embedded software for free we request you assist our global community by - participating in the support forum. - - http://www.FreeRTOS.org/training - Investing in training allows your team to - be as productive as possible as early as possible. Now you can receive - FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers - Ltd, and the world's leading authority on the world's leading RTOS. - - http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products, - including FreeRTOS+Trace - an indispensable productivity tool, a DOS - compatible FAT file system, and our tiny thread aware UDP/IP stack. - - http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate. - Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS. - - http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High - Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS - licenses offer ticketed support, indemnification and commercial middleware. - - http://www.SafeRTOS.com - High Integrity Systems also provide a safety - engineered and independently SIL3 certified version for use in safety and - mission critical applications that require provable dependability. - - 1 tab == 4 spaces! -*/ - -/* - * This is a heap allocator that can allocate memory out of several tagged memory regions, - * with the regions having differing capabilities. In the ESP32, this is used to - * allocate memory for the various applications within the space the MMU allows them - * to work with. It can also be used to e.g. allocate memory in DMA-capable regions. - * - * Usage notes: - * - * vPortDefineHeapRegions() ***must*** be called before pvPortMalloc(). - * pvPortMalloc() will be called if any task objects (tasks, queues, event - * groups, etc.) are created, therefore vPortDefineHeapRegions() ***must*** be - * called before any other objects are defined. - * - * vPortDefineHeapRegions() takes a single parameter. The parameter is an array - * of HeapRegionTagged_t structures. HeapRegion_t is defined in portable.h as - * - * typedef struct HeapRegion - * { - * uint8_t *pucStartAddress; << Start address of a block of memory that will be part of the heap. - * size_t xSizeInBytes; << Size of the block of memory. - * BaseType_t xTag; << Tag - * } HeapRegionTagged_t; - * - * 'Tag' allows you to allocate memory of a certain type. Tag -1 is special; - * it basically tells the allocator to ignore this region as if it is not - * in the array at all. This facilitates disabling memory regions. - * - * The array is terminated using a NULL zero sized region definition, and the - * memory regions defined in the array ***must*** appear in address order from - * low address to high address. So the following is a valid example of how - * to use the function. - * - * HeapRegionTagged_t xHeapRegions[] = - * { - * { ( uint8_t * ) 0x80000000UL, 0x10000, 1 }, << Defines a block of 0x10000 bytes starting at address 0x80000000, tag 1 - * { ( uint8_t * ) 0x90000000UL, 0xa0000, 2 }, << Defines a block of 0xa0000 bytes starting at address of 0x90000000, tag 2 - * { NULL, 0, 0 } << Terminates the array. - * }; - * - * vPortDefineHeapRegions( xHeapRegions ); << Pass the array into vPortDefineHeapRegions(). - * - * Note 0x80000000 is the lower address so appears in the array first. - * - * pvPortMallocTagged can be used to get memory in a tagged region. - * - */ - -/* - -ToDo: -- This malloc implementation can be somewhat slow, especially when it is called multiple times with multiple tags -when having low memory issues. ToDo: Make it quicker. - -JD - */ - - -#include - -/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining -all the API functions to use the MPU wrappers. That should only be done when -task.h is included from an application file. */ -#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE - -#include "FreeRTOS.h" -#include "task.h" -#include "heap_regions_debug.h" - -#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE - -#include "heap_regions.h" - -#include "rom/ets_sys.h" - -/* Block sizes must not get too small. */ -#define heapMINIMUM_BLOCK_SIZE ( ( size_t ) ( uxHeapStructSize << 1 ) ) - -/* Assumes 8bit bytes! */ -#define heapBITS_PER_BYTE ( ( size_t ) 8 ) - -/* Define the linked list structure. This is used to link free blocks in order - of their memory address. This is optimized for size of the linked list struct - and assumes a region is never larger than 16MiB. */ -#define HEAPREGIONS_MAX_REGIONSIZE (16*1024*1024) -typedef struct A_BLOCK_LINK -{ - struct A_BLOCK_LINK *pxNextFreeBlock; /*<< The next free block in the list. */ - int xBlockSize: 24; /*<< The size of the free block. */ - int xTag: 7; /*<< Tag of this region */ - int xAllocated: 1; /*<< 1 if allocated */ -} BlockLink_t; - -//Mux to protect the memory status data -static portMUX_TYPE xMallocMutex = portMUX_INITIALIZER_UNLOCKED; - -/*-----------------------------------------------------------*/ - -/* - * Inserts a block of memory that is being freed into the correct position in - * the list of free memory blocks. The block being freed will be merged with - * the block in front it and/or the block behind it if the memory blocks are - * adjacent to each other. - */ -static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ); - -/*-----------------------------------------------------------*/ - -/* The size of the structure placed at the beginning of each allocated memory -block must be correctly byte aligned. */ -static const uint32_t uxHeapStructSize = ( ( sizeof ( BlockLink_t ) + BLOCK_HEAD_LEN + BLOCK_TAIL_LEN + ( portBYTE_ALIGNMENT - 1 ) ) & ~portBYTE_ALIGNMENT_MASK ); - -/* Create a couple of list links to mark the start and end of the list. */ -static BlockLink_t xStart, *pxEnd = NULL; - -/* Keeps track of the number of free bytes remaining, but says nothing about -fragmentation. */ -static size_t xFreeBytesRemaining[HEAPREGIONS_MAX_TAGCOUNT] = {0}; -static size_t xMinimumEverFreeBytesRemaining[HEAPREGIONS_MAX_TAGCOUNT] = {0}; - - -/*-----------------------------------------------------------*/ - -void *pvPortMallocTagged( size_t xWantedSize, BaseType_t tag ) -{ -BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; -void *pvReturn = NULL; - - /* The heap must be initialised before the first call to - prvPortMalloc(). */ - configASSERT( pxEnd ); - - taskENTER_CRITICAL(&xMallocMutex); - { - /* The wanted size is increased so it can contain a BlockLink_t - structure in addition to the requested amount of bytes. */ - if( xWantedSize > 0 ) - { - xWantedSize += uxHeapStructSize; - - /* Ensure that blocks are always aligned to the required number - of bytes. */ - if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) - { - /* Byte alignment required. */ - xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining[ tag ] ) ) - { - /* Traverse the list from the start (lowest address) block until - one of adequate size is found. */ - pxPreviousBlock = &xStart; - pxBlock = xStart.pxNextFreeBlock; - while( ( ( pxBlock->xTag != tag ) || ( pxBlock->xBlockSize < xWantedSize ) ) && ( pxBlock->pxNextFreeBlock != NULL ) ) - { -// ets_printf("Block %x -> %x\n", (uint32_t)pxBlock, (uint32_t)pxBlock->pxNextFreeBlock); - - #if (configENABLE_MEMORY_DEBUG == 1) - { - mem_check_block(pxBlock); - } - #endif - - pxPreviousBlock = pxBlock; - pxBlock = pxBlock->pxNextFreeBlock; - } - - /* If the end marker was not reached then a block of adequate size - was found. */ - if( pxBlock != pxEnd ) - { - /* Return the memory space pointed to - jumping over the - BlockLink_t structure at its start. */ - pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + uxHeapStructSize - BLOCK_TAIL_LEN - BLOCK_HEAD_LEN); - - /* This block is being returned for use so must be taken out - of the list of free blocks. */ - pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; - - /* If the block is larger than required it can be split into - two. */ - - if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) - { - /* This block is to be split into two. Create a new - block following the number of bytes requested. The void - cast is used to prevent byte alignment warnings from the - compiler. */ - pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize); - - /* Calculate the sizes of two blocks split from the - single block. */ - pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; - pxNewBlockLink->xTag = tag; - pxBlock->xBlockSize = xWantedSize; - - #if (configENABLE_MEMORY_DEBUG == 1) - { - mem_init_dog(pxNewBlockLink); - } - #endif - - - /* Insert the new block into the list of free blocks. */ - prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - xFreeBytesRemaining[ tag ] -= pxBlock->xBlockSize; - - if( xFreeBytesRemaining[ tag ] < xMinimumEverFreeBytesRemaining[ tag ] ) - { - xMinimumEverFreeBytesRemaining[ tag ] = xFreeBytesRemaining[ tag ]; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* The block is being returned - it is allocated and owned - by the application and has no "next" block. */ - pxBlock->xAllocated = 1; - pxBlock->pxNextFreeBlock = NULL; - - #if (configENABLE_MEMORY_DEBUG == 1) - { - mem_init_dog(pxBlock); - mem_malloc_block(pxBlock); - } - #endif - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - traceMALLOC( pvReturn, xWantedSize ); - } - taskEXIT_CRITICAL(&xMallocMutex); - - #if( configUSE_MALLOC_FAILED_HOOK == 1 ) - { - if( pvReturn == NULL ) - { - extern void vApplicationMallocFailedHook( void ); - vApplicationMallocFailedHook(); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - #endif - - return pvReturn; -} -/*-----------------------------------------------------------*/ - -void vPortFreeTagged( void *pv ) -{ -uint8_t *puc = ( uint8_t * ) pv; -BlockLink_t *pxLink; - - if( pv != NULL ) - { - /* The memory being freed will have an BlockLink_t structure immediately - before it. */ - puc -= (uxHeapStructSize - BLOCK_TAIL_LEN - BLOCK_HEAD_LEN) ; - - /* This casting is to keep the compiler from issuing warnings. */ - pxLink = ( void * ) puc; - - #if (configENABLE_MEMORY_DEBUG == 1) - { - taskENTER_CRITICAL(&xMallocMutex); - mem_check_block(pxLink); - mem_free_block(pxLink); - taskEXIT_CRITICAL(&xMallocMutex); - } - #endif - - /* Check the block is actually allocated. */ - configASSERT( ( pxLink->xAllocated ) != 0 ); - configASSERT( pxLink->pxNextFreeBlock == NULL ); - - if( pxLink->xAllocated != 0 ) - { - if( pxLink->pxNextFreeBlock == NULL ) - { - /* The block is being returned to the heap - it is no longer - allocated. */ - pxLink->xAllocated = 0; - - taskENTER_CRITICAL(&xMallocMutex); - { - /* Add this block to the list of free blocks. */ - xFreeBytesRemaining[ pxLink->xTag ] += pxLink->xBlockSize; - traceFREE( pv, pxLink->xBlockSize ); - prvInsertBlockIntoFreeList( ( ( BlockLink_t * ) pxLink ) ); - } - taskEXIT_CRITICAL(&xMallocMutex); - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - } -} -/*-----------------------------------------------------------*/ - -size_t xPortGetFreeHeapSizeTagged( BaseType_t tag ) -{ - return xFreeBytesRemaining[ tag ]; -} -/*-----------------------------------------------------------*/ - -size_t xPortGetMinimumEverFreeHeapSizeTagged( BaseType_t tag ) -{ - return xMinimumEverFreeBytesRemaining[ tag ]; -} -/*-----------------------------------------------------------*/ - -static void prvInsertBlockIntoFreeList( BlockLink_t *pxBlockToInsert ) -{ -BlockLink_t *pxIterator; -uint8_t *puc; - - /* Iterate through the list until a block is found that has a higher address - than the block being inserted. */ - for( pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert; pxIterator = pxIterator->pxNextFreeBlock ) - { - /* Nothing to do here, just iterate to the right position. */ - } - - /* Do the block being inserted, and the block it is being inserted after - make a contiguous block of memory, and are the tags the same? */ - puc = ( uint8_t * ) pxIterator; - if( ( puc + pxIterator->xBlockSize ) == ( uint8_t * ) pxBlockToInsert && pxBlockToInsert->xTag==pxIterator->xTag) - { - pxIterator->xBlockSize += pxBlockToInsert->xBlockSize; - pxBlockToInsert = pxIterator; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } - - /* Do the block being inserted, and the block it is being inserted before - make a contiguous block of memory, and are the tags the same */ - puc = ( uint8_t * ) pxBlockToInsert; - if( ( puc + pxBlockToInsert->xBlockSize ) == ( uint8_t * ) pxIterator->pxNextFreeBlock && pxBlockToInsert->xTag==pxIterator->pxNextFreeBlock->xTag ) - { - if( pxIterator->pxNextFreeBlock != pxEnd ) - { - /* Form one big block from the two blocks. */ - pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize; - pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock; - } - else - { - pxBlockToInsert->pxNextFreeBlock = pxEnd; - } - } - else - { - pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; - } - - /* If the block being inserted plugged a gap, so was merged with the block - before and the block after, then it's pxNextFreeBlock pointer will have - already been set, and should not be set here as that would make it point - to itself. */ - if( pxIterator != pxBlockToInsert ) - { - pxIterator->pxNextFreeBlock = pxBlockToInsert; - } - else - { - mtCOVERAGE_TEST_MARKER(); - } -} -/*-----------------------------------------------------------*/ - -void vPortDefineHeapRegionsTagged( const HeapRegionTagged_t * const pxHeapRegions ) -{ -BlockLink_t *pxFirstFreeBlockInRegion = NULL, *pxPreviousFreeBlock; -uint8_t *pucAlignedHeap; -size_t xTotalRegionSize, xTotalHeapSize = 0; -BaseType_t xDefinedRegions = 0, xRegIdx = 0; -uint32_t ulAddress; -const HeapRegionTagged_t *pxHeapRegion; - - /* Can only call once! */ - configASSERT( pxEnd == NULL ); - - vPortCPUInitializeMutex(&xMallocMutex); - - pxHeapRegion = &( pxHeapRegions[ xRegIdx ] ); - - while( pxHeapRegion->xSizeInBytes > 0 ) - { - if ( pxHeapRegion->xTag == -1 ) { - /* Move onto the next HeapRegionTagged_t structure. */ - xRegIdx++; - pxHeapRegion = &( pxHeapRegions[ xRegIdx ] ); - continue; - } - - configASSERT(pxHeapRegion->xTag < HEAPREGIONS_MAX_TAGCOUNT); - configASSERT(pxHeapRegion->xSizeInBytes < HEAPREGIONS_MAX_REGIONSIZE); - xTotalRegionSize = pxHeapRegion->xSizeInBytes; - - /* Ensure the heap region starts on a correctly aligned boundary. */ - ulAddress = ( uint32_t ) pxHeapRegion->pucStartAddress; - if( ( ulAddress & portBYTE_ALIGNMENT_MASK ) != 0 ) - { - ulAddress += ( portBYTE_ALIGNMENT - 1 ); - ulAddress &= ~portBYTE_ALIGNMENT_MASK; - - /* Adjust the size for the bytes lost to alignment. */ - xTotalRegionSize -= ulAddress - ( uint32_t ) pxHeapRegion->pucStartAddress; - } - - pucAlignedHeap = ( uint8_t * ) ulAddress; - - /* Set xStart if it has not already been set. */ - if( xDefinedRegions == 0 ) - { - /* xStart is used to hold a pointer to the first item in the list of - free blocks. The void cast is used to prevent compiler warnings. */ - xStart.pxNextFreeBlock = ( BlockLink_t * ) (pucAlignedHeap + BLOCK_HEAD_LEN); - xStart.xBlockSize = ( size_t ) 0; - } - else - { - /* Should only get here if one region has already been added to the - heap. */ - configASSERT( pxEnd != NULL ); - - /* Check blocks are passed in with increasing start addresses. */ - configASSERT( ulAddress > ( uint32_t ) pxEnd ); - } - - /* Remember the location of the end marker in the previous region, if - any. */ - pxPreviousFreeBlock = pxEnd; - - /* pxEnd is used to mark the end of the list of free blocks and is - inserted at the end of the region space. */ - ulAddress = ( ( uint32_t ) pucAlignedHeap ) + xTotalRegionSize; - ulAddress -= uxHeapStructSize; - ulAddress &= ~portBYTE_ALIGNMENT_MASK; - pxEnd = ( BlockLink_t * ) (ulAddress + BLOCK_HEAD_LEN); - pxEnd->xBlockSize = 0; - pxEnd->pxNextFreeBlock = NULL; - pxEnd->xTag = -1; - - /* To start with there is a single free block in this region that is - sized to take up the entire heap region minus the space taken by the - free block structure. */ - pxFirstFreeBlockInRegion = ( BlockLink_t * ) (pucAlignedHeap + BLOCK_HEAD_LEN); - pxFirstFreeBlockInRegion->xBlockSize = ulAddress - ( uint32_t ) pxFirstFreeBlockInRegion + BLOCK_HEAD_LEN; - pxFirstFreeBlockInRegion->pxNextFreeBlock = pxEnd; - pxFirstFreeBlockInRegion->xTag=pxHeapRegion->xTag; - - /* If this is not the first region that makes up the entire heap space - then link the previous region to this region. */ - if( pxPreviousFreeBlock != NULL ) - { - pxPreviousFreeBlock->pxNextFreeBlock = pxFirstFreeBlockInRegion; - } - - xTotalHeapSize += pxFirstFreeBlockInRegion->xBlockSize; - xMinimumEverFreeBytesRemaining[ pxHeapRegion->xTag ] += pxFirstFreeBlockInRegion->xBlockSize; - xFreeBytesRemaining[ pxHeapRegion->xTag ] += pxFirstFreeBlockInRegion->xBlockSize; - - /* Move onto the next HeapRegionTagged_t structure. */ - xDefinedRegions++; - xRegIdx++; - pxHeapRegion = &( pxHeapRegions[ xRegIdx ] ); - - #if (configENABLE_MEMORY_DEBUG == 1) - { - mem_init_dog(pxFirstFreeBlockInRegion); - mem_init_dog(pxEnd); - } - #endif - } - - /* Check something was actually defined before it is accessed. */ - configASSERT( xTotalHeapSize ); - - - #if (configENABLE_MEMORY_DEBUG == 1) - { - mem_debug_init(uxHeapStructSize, &xStart, pxEnd, &xMallocMutex); - mem_check_all(0); - } - #endif -} - diff --git a/components/freertos/heap_regions_debug.c b/components/freertos/heap_regions_debug.c deleted file mode 100644 index f221e516b0..0000000000 --- a/components/freertos/heap_regions_debug.c +++ /dev/null @@ -1,191 +0,0 @@ -#include "heap_regions_debug.h" -#include "FreeRTOS.h" -#include "task.h" -#include -#include -#include - -#if (configENABLE_MEMORY_DEBUG == 1) - -static os_block_t g_malloc_list, *g_free_list=NULL, *g_end; -static size_t g_heap_struct_size; -static mem_dbg_ctl_t g_mem_dbg; -char g_mem_print = 0; -static portMUX_TYPE *g_malloc_mutex = NULL; -#define MEM_DEBUG(...) - -void mem_debug_init(size_t size, void *start, void *end, portMUX_TYPE *mutex) -{ - MEM_DEBUG("size=%d start=%p end=%p mutex=%p%x\n", size, start, end, mutex); - memset(&g_mem_dbg, 0, sizeof(g_mem_dbg)); - memset(&g_malloc_list, 0, sizeof(g_malloc_list)); - g_malloc_mutex = mutex; - g_heap_struct_size = size; - g_free_list = start; - g_end = end; -} - -void mem_debug_push(char type, void *addr) -{ - os_block_t *b = (os_block_t*)addr; - debug_block_t *debug_b = DEBUG_BLOCK(b); - - MEM_DEBUG("push type=%d addr=%p\n", type, addr); - if (g_mem_print){ - if (type == DEBUG_TYPE_MALLOC){ - ets_printf("task=%s t=%s s=%u a=%p\n", debug_b->head.task?debug_b->head.task:"", type==DEBUG_TYPE_MALLOC?"m":"f", b->size, addr); - } else { - ets_printf("task=%s t=%s s=%u a=%p\n", debug_b->head.task?debug_b->head.task:"", type==DEBUG_TYPE_MALLOC?"m":"f", b->size, addr); - } - } else { - mem_dbg_info_t *info = &g_mem_dbg.info[g_mem_dbg.cnt%DEBUG_MAX_INFO_NUM]; - - info->addr = addr; - info->type = type; - info->time = g_mem_dbg.cnt; - g_mem_dbg.cnt++; - } -} - -void mem_debug_malloc_show(void) -{ - os_block_t *b = g_malloc_list.next; - debug_block_t *d; - - taskENTER_CRITICAL(g_malloc_mutex); - while (b){ - d = DEBUG_BLOCK(b); - d->head.task[3] = '\0'; - ets_printf("t=%s s=%u a=%p\n", d->head.task?d->head.task:"", b->size, b); - b = b->next; - } - taskEXIT_CRITICAL(g_malloc_mutex); -} - -void mem_debug_show(void) -{ - uint32_t i; - - if (!g_mem_print) return; - - for (i=0; ihead.task?b->head.task:"", b, HEAD_DOG(b), TAIL_DOG(b)); - DOG_ASSERT(); - } - } else { - ets_printf("f task=%s a=%p h=%08x\n", b->head.task?b->head.task:"", b, HEAD_DOG(b));\ - DOG_ASSERT(); - } -} - -void mem_init_dog(void *data) -{ - debug_block_t *b = DEBUG_BLOCK(data); - xTaskHandle task; - - MEM_DEBUG("init dog, data=%p debug_block=%p block_size=%x\n", data, b, b->os_block.size); - if (!data) return; -#if (INCLUDE_pcTaskGetTaskName == 1) - task = xTaskGetCurrentTaskHandle(); - if (task){ - strncpy(b->head.task, pcTaskGetTaskName(task), 3); - b->head.task[3] = '\0'; - } -#else - b->head.task = '\0'; -#endif - HEAD_DOG(b) = DEBUG_DOG_VALUE; - TAIL_DOG(b) = DEBUG_DOG_VALUE; -} - -void mem_check_all(void* pv) -{ - os_block_t *b; - - if (pv){ - char *puc = (char*)(pv); - os_block_t *b; - puc -= (g_heap_struct_size - BLOCK_TAIL_LEN - BLOCK_HEAD_LEN); - b = (os_block_t*)puc; - mem_check_block(b); - } - - taskENTER_CRITICAL(g_malloc_mutex); - b = g_free_list->next; - while(b && b != g_end){ - mem_check_block(b); - ets_printf("check b=%p size=%d ok\n", b, b->size); - b = b->next; - } - taskEXIT_CRITICAL(g_malloc_mutex); -} - -void mem_malloc_show(void) -{ - os_block_t *b = g_malloc_list.next; - debug_block_t *debug_b; - - while (b){ - debug_b = DEBUG_BLOCK(b); - ets_printf("%s %p %p %u\n", debug_b->head.task, debug_b, b, b->size); - b = b->next; - } -} - -void mem_malloc_block(void *data) -{ - os_block_t *b = (os_block_t*)data; - - MEM_DEBUG("mem malloc block data=%p, size=%u\n", data, b->size); - mem_debug_push(DEBUG_TYPE_MALLOC, data); - - if (b){ - b->next = g_malloc_list.next; - g_malloc_list.next = b; - } -} - -void mem_free_block(void *data) -{ - os_block_t *del = (os_block_t*)data; - os_block_t *b = g_malloc_list.next; - os_block_t *pre = &g_malloc_list; - debug_block_t *debug_b; - - MEM_DEBUG("mem free block data=%p, size=%d\n", data, del->size); - mem_debug_push(DEBUG_TYPE_FREE, data); - - if (!del) { - return; - } - - while (b){ - if ( (del == b) ){ - pre->next = b->next; - b->next = NULL; - return; - } - pre = b; - b = b->next; - } - - debug_b = DEBUG_BLOCK(del); - ets_printf("%s %p %p %u already free\n", debug_b->head.task, debug_b, del, del->size); - mem_malloc_show(); - abort(); -} - -#endif - - diff --git a/components/freertos/include/freertos/heap_regions.h b/components/freertos/include/freertos/heap_regions.h deleted file mode 100644 index 090c5b9b36..0000000000 --- a/components/freertos/include/freertos/heap_regions.h +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#ifndef _HEAP_REGIONS_H -#define _HEAP_REGIONS_H - -#include "freertos/FreeRTOS.h" - -/* The maximum amount of tags in use */ -#define HEAPREGIONS_MAX_TAGCOUNT 16 - -/** - * @brief Structure to define a memory region - */ -typedef struct HeapRegionTagged -{ - uint8_t *pucStartAddress; ///< Start address of the region - size_t xSizeInBytes; ///< Size of the region - BaseType_t xTag; ///< Tag for the region - uint32_t xExecAddr; ///< If non-zero, indicates the region also has an alias in IRAM. -} HeapRegionTagged_t; - -/** - * @brief Initialize the heap allocator by feeding it the usable memory regions and their tags. - * - * This takes an array of heapRegionTagged_t structs, the last entry of which is a dummy entry - * which has pucStartAddress set to NULL. It will initialize the heap allocator to serve memory - * from these ranges. - * - * @param pxHeapRegions Array of region definitions - */ - -void vPortDefineHeapRegionsTagged( const HeapRegionTagged_t * const pxHeapRegions ); - - -/** - * @brief Allocate memory from a region with a certain tag - * - * Like pvPortMalloc, this returns an allocated chunk of memory. This function, - * however, forces the allocator to allocate from a region specified by a - * specific tag. - * - * @param xWantedSize Size needed, in bytes - * @param tag Tag of the memory region the allocation has to be from - * - * @return Pointer to allocated memory if succesful. - * NULL if unsuccesful. - */ -void *pvPortMallocTagged( size_t xWantedSize, BaseType_t tag ); - -/** - * @brief Free memory allocated with pvPortMallocTagged - * - * This is basically an implementation of free(). - * - * @param pv Pointer to region allocated by pvPortMallocTagged - */ -void vPortFreeTagged( void *pv ); - -/** - * @brief Get the lowest amount of memory free for a certain tag - * - * This function allows the user to see what the least amount of - * free memory for a certain tag is. - * - * @param tag Tag of the memory region - * - * @return Minimum amount of free bytes available in the runtime of - * the program - */ -size_t xPortGetMinimumEverFreeHeapSizeTagged( BaseType_t tag ); - -/** - * @brief Get the amount of free bytes in a certain tagged region - * - * Works like xPortGetFreeHeapSize but allows the user to specify - * a specific tag - * - * @param tag Tag of the memory region - * - * @return Remaining amount of free bytes in region - */ -size_t xPortGetFreeHeapSizeTagged( BaseType_t tag ); - - -#endif \ No newline at end of file diff --git a/components/freertos/include/freertos/heap_regions_debug.h b/components/freertos/include/freertos/heap_regions_debug.h deleted file mode 100644 index dca9531d7e..0000000000 --- a/components/freertos/include/freertos/heap_regions_debug.h +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef _HEAP_REGION_DEBUG_H -#define _HEAP_REGION_DEBUG_H - -#include "FreeRTOS.h" - -#if (configENABLE_MEMORY_DEBUG == 1) - -#define DEBUG_DOG_VALUE 0x1a2b3c4d -#define DEBUG_MAX_INFO_NUM 20 -#define DEBUG_TYPE_MALLOC 1 -#define DEBUG_TYPE_FREE 2 - -typedef struct { - unsigned int dog; - char task[4]; - unsigned int pc; -}block_head_t; - -typedef struct { - unsigned int dog; -}block_tail_t; - -/* Please keep this definition same as BlockLink_t */ -typedef struct _os_block_t { - struct _os_block_t *next; /*<< The next free block in the list. */ - int size: 24; /*<< The size of the free block. */ - int xtag: 7; /*<< Tag of this region */ - int xAllocated: 1; /*<< 1 if allocated */ -}os_block_t; - -typedef struct { - block_head_t head; - os_block_t os_block; -}debug_block_t; - -typedef struct _mem_dbg_info{ - void *addr; - char *task; - uint32_t pc; - uint32_t time; - uint8_t type; -}mem_dbg_info_t; - -typedef struct _mem_dbg_ctl{ - mem_dbg_info_t info[DEBUG_MAX_INFO_NUM]; - uint32_t cnt; -}mem_dbg_ctl_t; - -#define BLOCK_HEAD_LEN sizeof(block_head_t) -#define BLOCK_TAIL_LEN sizeof(block_tail_t) -#define OS_BLOCK(_b) ((os_block_t*)((debug_block_t*)((char*)(_b) + BLOCK_HEAD_LEN))) -#define DEBUG_BLOCK(_b) ((debug_block_t*)((char*)(_b) - BLOCK_HEAD_LEN)) -#define HEAD_DOG(_b) ((_b)->head.dog) -#define TAIL_DOG(_b) (*(unsigned int*)((char*)(_b) + (((_b)->os_block.size ) - BLOCK_TAIL_LEN))) - -#define DOG_ASSERT()\ -{\ - mem_debug_show();\ - abort();\ -} - -extern void mem_check_block(void * data); -extern void mem_init_dog(void *data); -extern void mem_debug_init(size_t size, void *start, void *end, portMUX_TYPE *mutex); -extern void mem_malloc_block(void *data); -extern void mem_free_block(void *data); -extern void mem_check_all(void* pv); - -#else - -#define mem_check_block(...) -#define mem_init_dog(...) - -#define BLOCK_HEAD_LEN 0 -#define BLOCK_TAIL_LEN 0 - -#endif - -#endif diff --git a/components/freertos/include/freertos/portable.h b/components/freertos/include/freertos/portable.h index d62ce01b72..45eaf73964 100644 --- a/components/freertos/include/freertos/portable.h +++ b/components/freertos/include/freertos/portable.h @@ -136,29 +136,12 @@ extern "C" { StackType_t *pxPortInitialiseStack( StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters ) PRIVILEGED_FUNCTION; #endif -/* Used by heap_5.c. */ -typedef struct HeapRegion -{ - uint8_t *pucStartAddress; - size_t xSizeInBytes; -} HeapRegion_t; - -/* - * Used to define multiple heap regions for use by heap_5.c. This function - * must be called before any calls to pvPortMalloc() - not creating a task, - * queue, semaphore, mutex, software timer, event group, etc. will result in - * pvPortMalloc being called. - * - * pxHeapRegions passes in an array of HeapRegion_t structures - each of which - * defines a region of memory that can be used as the heap. The array is - * terminated by a HeapRegions_t structure that has a size of 0. The region - * with the lowest start address must appear first in the array. - */ -void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ); - - /* * Map to the memory management routines required for the port. + * + * Note that libc standard malloc/free are also available for + * non-FreeRTOS-specific code, and behave the same as + * pvPortMalloc()/vPortFree(). */ void *pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION; void vPortFree( void *pv ) PRIVILEGED_FUNCTION; diff --git a/components/freertos/port.c b/components/freertos/port.c index a578df9a17..942f294bde 100644 --- a/components/freertos/port.c +++ b/components/freertos/port.c @@ -102,7 +102,7 @@ #include "task.h" #include "esp_panic.h" - +#include "esp_heap_caps.h" #include "esp_crosscore_int.h" #include "esp_intr_alloc.h" @@ -442,5 +442,29 @@ uint32_t xPortGetTickRateHz(void) { return (uint32_t)configTICK_RATE_HZ; } +/* Heap functions, wrappers around heap_caps_xxx functions + NB: libc malloc() & free() are also defined & available + for this purpose. + */ + +void *pvPortMalloc( size_t xWantedSize ) +{ + return heap_caps_malloc( MALLOC_CAP_8BIT, xWantedSize); +} + +void vPortFree( void *pv ) +{ + return heap_caps_free(pv); +} + +size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION +{ + return heap_caps_get_free_size( MALLOC_CAP_8BIT ); +} + +size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION +{ + return heap_caps_get_minimum_free_size( MALLOC_CAP_8BIT ); +} diff --git a/components/heap/component.mk b/components/heap/component.mk new file mode 100644 index 0000000000..ebd7a7d59b --- /dev/null +++ b/components/heap/component.mk @@ -0,0 +1,3 @@ +# +# Component Makefile +# diff --git a/components/heap/heap_caps.c b/components/heap/heap_caps.c new file mode 100644 index 0000000000..3484312c85 --- /dev/null +++ b/components/heap/heap_caps.c @@ -0,0 +1,282 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include +#include +#include +#include +#include "esp_attr.h" +#include "esp_heap_caps.h" +#include "multi_heap.h" +#include "esp_log.h" +#include "heap_private.h" + +/* +This file, combined with a region allocator that supports multiple heaps, solves the problem that the ESP32 has RAM +that's slightly heterogeneous. Some RAM can be byte-accessed, some allows only 32-bit accesses, some can execute memory, +some can be remapped by the MMU to only be accessed by a certain PID etc. In order to allow the most flexible memory +allocation possible, this code makes it possible to request memory that has certain capabilities. The code will then use +its knowledge of how the memory is configured along with a priority scheme to allocate that memory in the most sane way +possible. This should optimize the amount of RAM accessible to the code without hardwiring addresses. +*/ + +/* + This takes a memory chunk in a region that can be addressed as both DRAM as well as IRAM. It will convert it to + IRAM in such a way that it can be later freed. It assumes both the address as wel as the length to be word-aligned. + It returns a region that's 1 word smaller than the region given because it stores the original Dram address there. + + In theory, we can also make this work by prepending a struct that looks similar to the block link struct used by the + heap allocator itself, which will allow inspection tools relying on any block returned from any sort of malloc to + have such a block in front of it, work. We may do this later, if/when there is demand for it. For now, a simple + pointer is used. +*/ +IRAM_ATTR static void *dram_alloc_to_iram_addr(void *addr, size_t len) +{ + uint32_t dstart = (int)addr; //First word + uint32_t dend = ((int)addr) + len - 4; //Last word + assert(dstart >= SOC_DIRAM_DRAM_LOW); + assert(dend <= SOC_DIRAM_DRAM_HIGH); + assert((dstart & 3) == 0); + assert((dend & 3) == 0); + uint32_t istart = SOC_DIRAM_IRAM_LOW + (SOC_DIRAM_DRAM_HIGH - dend); + uint32_t *iptr = (uint32_t *)istart; + *iptr = dstart; + return (void *)(iptr + 1); +} + +/* return all possible capabilities (across all priorities) for a given heap */ +inline static uint32_t get_all_caps(const heap_t *heap) +{ + if (heap->heap == NULL) { + return 0; + } + uint32_t all_caps = 0; + for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) { + all_caps |= heap->caps[prio]; + } + return all_caps; +} + +/* +Routine to allocate a bit of memory with certain capabilities. caps is a bitfield of MALLOC_CAP_* bits. +*/ +IRAM_ATTR void *heap_caps_malloc( size_t size, uint32_t caps ) +{ + void *ret = NULL; + uint32_t remCaps; + + if (caps & MALLOC_CAP_EXEC) { + //MALLOC_CAP_EXEC forces an alloc from IRAM. There is a region which has both this as well as the following + //caps, but the following caps are not possible for IRAM. Thus, the combination is impossible and we return + //NULL directly, even although our heap capabilities (based on soc_memory_tags & soc_memory_regions) would + //indicate there is a tag for this. + if ((caps & MALLOC_CAP_8BIT) || (caps & MALLOC_CAP_DMA)) { + return NULL; + } + //If any, EXEC memory should be 32-bit aligned, so round up to the next multiple of 4. + size = (size + 3) & (~3); + } + for (int prio = 0; prio < SOC_MEMORY_TYPE_NO_PRIOS; prio++) { + //Iterate over heaps and check capabilities at this priority + for (int heap_idx = 0; heap_idx < num_registered_heaps; heap_idx++) { + heap_t *heap = ®istered_heaps[heap_idx]; + if ((heap->caps[prio] & caps) != 0) { + //Heap has at least one of the caps requested. If caps has other bits set that this prio + //doesn't cover, see if they're available in other prios. + remCaps = caps & (~heap->caps[prio]); //Remaining caps to be fulfilled + int j = prio + 1; + while (remCaps != 0 && j < SOC_MEMORY_TYPE_NO_PRIOS) { + remCaps = remCaps & (~heap->caps[j]); + j++; + } + if (remCaps == 0) { + //This heap can satisfy all the requested capabilities. See if we can grab some memory using it. + if ((caps & MALLOC_CAP_EXEC) && heap->start >= SOC_DIRAM_DRAM_LOW && heap->start < SOC_DIRAM_DRAM_HIGH) { + //This is special, insofar that what we're going to get back is a DRAM address. If so, + //we need to 'invert' it (lowest address in DRAM == highest address in IRAM and vice-versa) and + //add a pointer to the DRAM equivalent before the address we're going to return. + ret = multi_heap_malloc(heap->heap, size + 4); + if (ret != NULL) { + return dram_alloc_to_iram_addr(ret, size + 4); + } + } else { + //Just try to alloc, nothing special. + ret = multi_heap_malloc(heap->heap, size); + if (ret != NULL) { + return ret; + } + } + } + } + } + } + //Nothing usable found. + return NULL; +} + +/* Find the heap which belongs to ptr, or return NULL if it's + not in any heap. + + (This confirms if ptr is inside the heap's region, doesn't confirm if 'ptr' + is an allocated block or is some other random address inside the heap.) +*/ +IRAM_ATTR static heap_t *find_containing_heap(void *ptr ) +{ + intptr_t p = (intptr_t)ptr; + for (size_t i = 0; i < num_registered_heaps; i++) { + heap_t *heap = ®istered_heaps[i]; + if (p >= heap->start && p < heap->end) { + return heap; + } + } + return NULL; +} + +IRAM_ATTR void heap_caps_free( void *ptr) +{ + intptr_t p = (intptr_t)ptr; + + if (ptr == NULL) { + return; + } + + if ((p >= SOC_DIRAM_IRAM_LOW) && (p <= SOC_DIRAM_IRAM_HIGH)) { + //Memory allocated here is actually allocated in the DRAM alias region and + //cannot be de-allocated as usual. dram_alloc_to_iram_addr stores a pointer to + //the equivalent DRAM address, though; free that. + uint32_t *dramAddrPtr = (uint32_t *)ptr; + ptr = (void *)dramAddrPtr[-1]; + } + + heap_t *heap = find_containing_heap(ptr); + assert(heap != NULL && "free() target pointer is outside heap areas"); + multi_heap_free(heap->heap, ptr); +} + +IRAM_ATTR void *heap_caps_realloc( void *ptr, size_t size, int caps) +{ + if (ptr == NULL) { + return heap_caps_malloc(size, caps); + } + + if (size == 0) { + heap_caps_free(ptr); + return NULL; + } + + heap_t *heap = find_containing_heap(ptr); + + assert(heap != NULL && "realloc() pointer is outside heap areas"); + + // are the existing heap's capabilities compatible with the + // requested ones? + bool compatible_caps = (caps & get_all_caps(heap)) == caps; + + if (compatible_caps) { + // try to reallocate this memory within the same heap + // (which will resize the block if it can) + void *r = multi_heap_realloc(heap->heap, ptr, size); + if (r != NULL) { + return r; + } + } + + // if we couldn't do that, try to see if we can reallocate + // in a different heap with requested capabilities. + void *new_p = heap_caps_malloc(size, caps); + if (new_p != NULL) { + size_t old_size = multi_heap_get_allocated_size(heap->heap, ptr); + assert(old_size > 0); + memcpy(new_p, ptr, old_size); + heap_caps_free(ptr); + return new_p; + } + return NULL; +} + +size_t heap_caps_get_free_size( uint32_t caps ) +{ + size_t ret = 0; + for (int i = 0; i < num_registered_heaps; i++) { + heap_t *heap = ®istered_heaps[i]; + if ((get_all_caps(heap) & caps) == caps) { + ret += multi_heap_free_size(heap->heap); + } + } + return ret; +} + +size_t heap_caps_get_minimum_free_size( uint32_t caps ) +{ + size_t ret = 0; + for (int i = 0; i < num_registered_heaps; i++) { + heap_t *heap = ®istered_heaps[i]; + if ((get_all_caps(heap) & caps) == caps) { + ret += multi_heap_minimum_free_size(heap->heap); + } + } + return ret; +} + +size_t heap_caps_get_largest_free_block( uint32_t caps ) +{ + multi_heap_info_t info; + heap_caps_get_info(&info, caps); + return info.largest_free_block; +} + +void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps ) +{ + bzero(info, sizeof(multi_heap_info_t)); + + for (int i = 0; i < num_registered_heaps; i++) { + heap_t *heap = ®istered_heaps[i]; + if ((get_all_caps(heap) & caps) == caps) { + multi_heap_info_t hinfo; + multi_heap_get_info(heap->heap, &hinfo); + + info->total_free_bytes += hinfo.total_free_bytes; + info->total_allocated_bytes += hinfo.total_allocated_bytes; + info->largest_free_block = MAX(info->largest_free_block, + hinfo.largest_free_block); + info->minimum_free_bytes += hinfo.minimum_free_bytes; + info->allocated_blocks += hinfo.allocated_blocks; + info->free_blocks += hinfo.free_blocks; + info->total_blocks += hinfo.total_blocks; + } + } +} + +void heap_caps_print_heap_info( uint32_t caps ) +{ + multi_heap_info_t info; + printf("Heap summary for capabilities 0x%08X:\n", caps); + for (int i = 0; i < num_registered_heaps; i++) { + heap_t *heap = ®istered_heaps[i]; + if ((get_all_caps(heap) & caps) == caps) { + multi_heap_get_info(heap->heap, &info); + + printf(" At 0x%08x len %d free %d allocated %d min_free %d\n", + heap->start, heap->end - heap->start, info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes); + printf(" largest_free_block %d alloc_blocks %d free_blocks %d total_blocks %d\n", + info.largest_free_block, info.allocated_blocks, + info.free_blocks, info.total_blocks); + } + } + printf(" Totals:\n"); + heap_caps_get_info(&info, caps); + + printf(" free %d allocated %d min_free %d largest_free_block %d\n", info.total_free_bytes, info.total_allocated_bytes, info.minimum_free_bytes, info.largest_free_block); +} + diff --git a/components/heap/heap_caps_init.c b/components/heap/heap_caps_init.c new file mode 100644 index 0000000000..f98bbf39a6 --- /dev/null +++ b/components/heap/heap_caps_init.c @@ -0,0 +1,182 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "heap_private.h" +#include +#include +#include +#include +#include + +static const char *TAG = "heap_init"; + +heap_t *registered_heaps; +size_t num_registered_heaps; + +static void register_heap(heap_t *region) +{ + region->heap = multi_heap_register((void *)region->start, region->end - region->start); + ESP_EARLY_LOGD(TAG, "New heap initialised at %p", region->heap); + assert(region->heap); +} + +void heap_caps_enable_nonos_stack_heaps() +{ + for (int i = 0; i < num_registered_heaps; i++) { + // Assume any not-yet-registered heap is + // a nonos-stack heap + heap_t *heap = ®istered_heaps[i]; + if (heap->heap == NULL) { + register_heap(heap); + multi_heap_set_lock(heap->heap, &heap->heap_mux); + } + } +} + +//Modify regions array to disable the given range of memory. +static void disable_mem_region(soc_memory_region_t *regions, intptr_t from, intptr_t to) +{ + //Align from and to on word boundaries + from = from & ~3; + to = (to + 3) & ~3; + + for (int i = 0; i < soc_memory_region_count; i++) { + soc_memory_region_t *region = ®ions[i]; + + intptr_t regStart = region->start; + intptr_t regEnd = region->start + region->size; + if (regStart >= from && regEnd <= to) { + //Entire region falls in the range. Disable entirely. + regions[i].type = -1; + } else if (regStart >= from && regEnd > to && regStart < to) { + //Start of the region falls in the range. Modify address/len. + intptr_t overlap = to - regStart; + region->start += overlap; + region->size -= overlap; + if (region->iram_address) { + region->iram_address += overlap; + } + } else if (regStart < from && regEnd > from && regEnd <= to) { + //End of the region falls in the range. Modify length. + region->size -= regEnd - from; + } else if (regStart < from && regEnd > to) { + //Range punches a hole in the region! We do not support this. + ESP_EARLY_LOGE(TAG, "region %d: hole punching is not supported!", i); + regions->type = -1; //Just disable memory region. That'll teach them! + } + } +} + +/* +Warning: These variables are assumed to have the start and end of the data and iram +area used statically by the program, respectively. These variables are defined in the ld +file. +*/ +extern int _data_start, _heap_start, _init_start, _iram_text_end; + +/* +Initialize the heap allocator. We pass it a bunch of region descriptors, but we need to modify those first to accommodate for +the data as loaded by the bootloader. +ToDo: The regions are different when stuff like trace memory, BT, ... is used. Modify the regions struct on the fly for this. +Same with loading of apps. Same with using SPI RAM. +*/ +void heap_caps_init() +{ + /* Copy the soc_memory_regions data to the stack, so we can + manipulate it. */ + soc_memory_region_t regions[soc_memory_region_count]; + memcpy(regions, soc_memory_regions, sizeof(soc_memory_region_t)*soc_memory_region_count); + + //Disable the bits of memory where this code is loaded. + disable_mem_region(regions, (intptr_t)&_data_start, (intptr_t)&_heap_start); //DRAM used by bss/data static variables + disable_mem_region(regions, (intptr_t)&_init_start, (intptr_t)&_iram_text_end); //IRAM used by code + + // Disable all regions reserved on this SoC + for (int i = 0; i < soc_reserved_region_count; i++) { + disable_mem_region(regions, soc_reserved_regions[i].start, + soc_reserved_regions[i].end); + } + + //The heap allocator will treat every region given to it as separate. In order to get bigger ranges of contiguous memory, + //it's useful to coalesce adjacent regions that have the same type. + + for (int i = 1; i < soc_memory_region_count; i++) { + soc_memory_region_t *a = ®ions[i - 1]; + soc_memory_region_t *b = ®ions[i]; + if (b->start == a->start + a->size && b->type == a->type ) { + a->type = -1; + b->start = a->start; + b->size += a->size; + } + } + + /* Count the heaps left after merging */ + num_registered_heaps = 0; + for (int i = 0; i < soc_memory_region_count; i++) { + if (regions[i].type != -1) { + num_registered_heaps++; + } + } + + /* Start by allocating the registered heap data on the stack. + + Once we have a heap to copy it to, we will copy it to a heap buffer. + */ + multi_heap_handle_t first_heap = NULL; + heap_t temp_heaps[num_registered_heaps]; + size_t heap_idx = 0; + + ESP_EARLY_LOGI(TAG, "Initializing. RAM available for dynamic allocation:"); + for (int i = 0; i < soc_memory_region_count; i++) { + soc_memory_region_t *region = ®ions[i]; + const soc_memory_type_desc_t *type = &soc_memory_types[region->type]; + heap_t *heap = &temp_heaps[heap_idx]; + if (region->type == -1) { + continue; + } + heap_idx++; + assert(heap_idx <= num_registered_heaps); + + heap->type = region->type; + heap->start = region->start; + heap->end = region->start + region->size; + memcpy(heap->caps, type->caps, sizeof(heap->caps)); + vPortCPUInitializeMutex(&heap->heap_mux); + + ESP_EARLY_LOGI(TAG, "At %08X len %08X (%d KiB): %s", + region->start, region->size, region->size / 1024, type->name); + + if (type->startup_stack) { + /* Will be registered when OS scheduler starts */ + heap->heap = NULL; + } else { + register_heap(heap); + if (first_heap == NULL) { + first_heap = heap->heap; + } + } + } + + /* Allocate the permanent heap data that we'll use for runtime */ + assert(heap_idx == num_registered_heaps); + registered_heaps = multi_heap_malloc(first_heap, sizeof(heap_t) * num_registered_heaps); + memcpy(registered_heaps, temp_heaps, sizeof(heap_t)*num_registered_heaps); + + /* Now the heap_mux fields live on the heap, assign them */ + for (int i = 0; i < num_registered_heaps; i++) { + if (registered_heaps[i].heap != NULL) { + multi_heap_set_lock(registered_heaps[i].heap, ®istered_heaps[i].heap_mux); + } + } +} + diff --git a/components/heap/heap_private.h b/components/heap/heap_private.h new file mode 100644 index 0000000000..ed5fd6b533 --- /dev/null +++ b/components/heap/heap_private.h @@ -0,0 +1,38 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include +#include +#include "multi_heap.h" + +/* Some common heap registration data structures used + for heap_caps_init.c to share heap information with heap_caps.c +*/ + +/* Type for describing each registered heap */ +typedef struct { + size_t type; + uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS]; ///< Capabilities for the type of memory in this healp (as a prioritised set). Copied from soc_memory_types so it's in RAM not flash. + intptr_t start; + intptr_t end; + portMUX_TYPE heap_mux; + multi_heap_handle_t heap; +} heap_t; + +extern heap_t *registered_heaps; +extern size_t num_registered_heaps; + diff --git a/components/heap/include/esp_heap_alloc_caps.h b/components/heap/include/esp_heap_alloc_caps.h new file mode 100644 index 0000000000..5338d279c3 --- /dev/null +++ b/components/heap/include/esp_heap_alloc_caps.h @@ -0,0 +1,35 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#warning "This header is deprecated, please use functions defined in esp_heap_caps.h instead." +#include "esp_heap_caps.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Deprecated FreeRTOS-style esp_heap_alloc_caps.h functions follow */ + +/* Please use heap_caps_malloc() instead of this function */ +void *pvPortMallocCaps(size_t xWantedSize, uint32_t caps) asm("heap_caps_malloc") __attribute__((deprecated)); + +/* Please use heap_caps_get_minimum_free_heap_size() instead of this function */ +size_t xPortGetMinimumEverFreeHeapSizeCaps( uint32_t caps ) asm("heap_caps_get_minimum_free_heap_size") __attribute__((deprecated)); + +/* Please use heap_caps_get_free_heap_size() instead of this function */ +size_t xPortGetFreeHeapSizeCaps( uint32_t caps ) asm("heap_caps_get_free_heap_size") __attribute__((deprecated)); + +#ifdef __cplusplus +} +#endif diff --git a/components/heap/include/esp_heap_caps.h b/components/heap/include/esp_heap_caps.h new file mode 100644 index 0000000000..26bc2abebb --- /dev/null +++ b/components/heap/include/esp_heap_caps.h @@ -0,0 +1,175 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include +#include +#include "multi_heap.h" + +/** + * @brief Flags to indicate the capabilities of the various memory systems + */ +#define MALLOC_CAP_EXEC (1<<0) ///< Memory must be able to run executable code +#define MALLOC_CAP_32BIT (1<<1) ///< Memory must allow for aligned 32-bit data accesses +#define MALLOC_CAP_8BIT (1<<2) ///< Memory must allow for 8/16/...-bit data accesses +#define MALLOC_CAP_DMA (1<<3) ///< Memory must be able to accessed by DMA +#define MALLOC_CAP_PID2 (1<<4) ///< Memory must be mapped to PID2 memory space (PIDs are not currently used) +#define MALLOC_CAP_PID3 (1<<5) ///< Memory must be mapped to PID3 memory space (PIDs are not currently used) +#define MALLOC_CAP_PID4 (1<<6) ///< Memory must be mapped to PID4 memory space (PIDs are not currently used) +#define MALLOC_CAP_PID5 (1<<7) ///< Memory must be mapped to PID5 memory space (PIDs are not currently used) +#define MALLOC_CAP_PID6 (1<<8) ///< Memory must be mapped to PID6 memory space (PIDs are not currently used) +#define MALLOC_CAP_PID7 (1<<9) ///< Memory must be mapped to PID7 memory space (PIDs are not currently used) +#define MALLOC_CAP_SPISRAM (1<<10) ///< Memory must be in SPI SRAM +#define MALLOC_CAP_INVALID (1<<31) ///< Memory can't be used / list end marker + + +/** + * @brief Initialize the capability-aware heap allocator. + * + * This is called once in the IDF startup code. Do not call it + * at other times. + */ +void heap_caps_init(); + +/** + * @brief Enable heap(s) in memory regions where the startup stacks are located. + * + * On startup, the pro/app CPUs have a certain memory region they use as stack, so we + * cannot do allocations in the regions these stack frames are. When FreeRTOS is + * completely started, they do not use that memory anymore and heap(s) there can + * be enabled. + */ +void heap_caps_enable_nonos_stack_heaps(); + +/** + * @brief Allocate a chunk of memory which has the given capabilities + * + * Equivalent semantics to libc malloc(), for capability-aware memory. + * + * In IDF, malloc(p) is equivalent to heaps_caps_malloc(p, MALLOC_CAP_8BIT); + * + * @param size Size, in bytes, of the amount of memory to allocate + * @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type + * of memory to be returned + * + * @return A pointer to the memory allocated on success, NULL on failure + */ +void *heap_caps_malloc(size_t size, uint32_t caps); + +/** + * @brief Free memory previously allocated via heap_caps_malloc() or heap_caps_realloc(). + * + * Equivalent semantics to libc free(), for capability-aware memory. + * + * In IDF, free(p) is equivalent to heap_caps_free(p). + * + * @param ptr Pointer to memory previously returned from heap_caps_malloc() or heap_caps_realloc(). Can be NULL. + */ +void heap_caps_free( void *ptr); + +/** + * @brief Reallocate memory previously allocated via heaps_caps_malloc() or heaps_caps_realloc(). + * + * Equivalent semantics to libc realloc(), for capability-aware memory. + * + * In IDF, realloc(p, s) is equivalent to heap_caps_realloc(p, s, MALLOC_CAP_8BIT). + * + * 'caps' parameter can be different to the capabilities that any original 'ptr' was allocated with. In this way, + * realloc can be used to "move" a buffer if necessary to ensure it meets new set of capabilities. + * + * @param ptr Pointer to previously allocated memory, or NULL for a new allocation. + * @param size Size of the new buffer requested, or 0 to free the buffer. + * @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type + * of memory desired for the new allocation. + * + * @return Pointer to a new buffer of size 'size' with capabilities 'caps', or NULL if allocation failed. + */ +void *heap_caps_realloc( void *ptr, size_t size, int caps); + + +/** + * @brief Get the total free size of all the regions that have the given capabilities + * + * This function takes all regions capable of having the given capabilities allocated in them + * and adds up the free space they have. + * + * Note that because of heap fragmentation it is probably not possible to allocate a single block of memory + * of this size. Use heap_caps_get_largest_free_block() for this purpose. + + * @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type + * of memory + * + * @return Amount of free bytes in the regions + */ +size_t heap_caps_get_free_size( uint32_t caps ); + + +/** + * @brief Get the total minimum free memory of all regions with the given capabilities + * + * This adds all the low water marks of the regions capable of delivering the memory + * with the given capabilities. + * + * Note the result may be less than the global all-time minimum available heap of this kind, as "low water marks" are + * tracked per-heap. Individual heaps may have reached their "low water marks" at different points in time. However + * this result still gives a "worst case" indication for all-time free heap. + * + * @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type + * of memory + * + * @return Amount of free bytes in the regions + */ +size_t heap_caps_get_minimum_free_size( uint32_t caps ); + +/** + * @brief Get the largest free block of memory able to be allocated with the given capabilities. + * + * Returns the largest value of 's' for which heap_caps_malloc(s, caps) will succeed. + * + * @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type + * of memory + * + * @return Size of largest free block in bytes. + */ +size_t heap_caps_get_largest_free_block( uint32_t caps ); + + +/** + * @brief Get heap info for all regions with the given capabilities. + * + * Calls multi_heap_info() on all heaps which share the given capabilities. The information returned is an aggregate + * across all matching heaps. The meanings of fields are the same as defined for multi_heap_info_t, except that + * minimum_free_bytes has the same caveats described in heap_caps_get_minimum_free_size(). + * + * @param info Pointer to a structure which will be filled with relevant + * heap metadata. + * @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type + * of memory + * + */ +void heap_caps_get_info( multi_heap_info_t *info, uint32_t caps ); + + +/** + * @brief Print a summary of all memory with the given capabilities. + * + * Calls multi_heap_info() on all heaps which share the given capabilities, and + * prints a two-line summary for each, then a total summary. + * + * @param caps Bitwise OR of MALLOC_CAP_* flags indicating the type + * of memory + * + */ +void heap_caps_print_heap_info( uint32_t caps ); + diff --git a/components/heap/include/multi_heap.h b/components/heap/include/multi_heap.h new file mode 100644 index 0000000000..1bad09a99f --- /dev/null +++ b/components/heap/include/multi_heap.h @@ -0,0 +1,169 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include +#include + +/* multi_heap is a heap implementation for handling multiple + heterogenous heaps in a single program. + + Any contiguous block of memory can be registered as a heap. +*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/** @brief Opaque handle to a registered heap */ +typedef struct multi_heap_info *multi_heap_handle_t; + +/** @brief malloc() a buffer in a given heap + * + * Semantics are the same as standard malloc(), only the returned buffer will be allocated in the specified heap. + * + * @param heap Handle to a registered heap. + * @param size Size of desired buffer. + * + * @return Pointer to new memory, or NULL if allocation fails. + */ +void *multi_heap_malloc(multi_heap_handle_t heap, size_t size); + +/** @brief free() a buffer in a given heap. + * + * Semantics are the same as standard free(), only the argument 'p' must be NULL or have been allocated in the specified heap. + * + * @param heap Handle to a registered heap. + * @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap. + */ +void multi_heap_free(multi_heap_handle_t heap, void *p); + +/** @brief realloc() a buffer in a given heap. + * + * Semantics are the same as standard realloc(), only the argument 'p' must be NULL or have been allocated in the specified heap. + * + * @param heap Handle to a registered heap. + * @param p NULL, or a pointer previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap. + * @param size Desired new size for buffer. + * + * @return New buffer of 'size' containing contents of 'p', or NULL if reallocation failed. + */ +void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size); + + +/** @brief Return the size that a particular pointer was allocated with. + * + * @param heap Handle to a registered heap. + * @param p Pointer, must have been previously returned from multi_heap_malloc() or multi_heap_realloc() for the same heap. + * + * @return Size of the memory allocated at this block. May be more than the original size argument, due + * to padding and minimum block sizes. + */ +size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p); + + +/** @brief Register a new heap for use + * + * This function initialises a heap at the specified address, and returns a handle for future heap operations. + * + * There is no equivalent function for deregistering a heap - if all blocks in the heap are free, you can immediately start using the memory for other purposes. + * + * @param start Start address of the memory to use for a new heap. + * @param size Size (in bytes) of the new heap. + * + * @return Handle of a new heap ready for use, or NULL if the heap region was too small to be initialised. + */ +multi_heap_handle_t multi_heap_register(void *start, size_t size); + + +/** @brief Associate a private lock pointer with a heap + * + * The lock argument is supplied to the MULTI_HEAP_LOCK() and MULTI_HEAP_UNLOCK() macros, defined in multi_heap_platform.h. + * + * When the heap is first registered, the associated lock is NULL. + * + * @param heap Handle to a registered heap. + * @param lock Optional pointer to a locking structure to associate with this heap. + */ +void multi_heap_set_lock(multi_heap_handle_t heap, void* lock); + +/** @brief Dump heap information to stdout + * + * For debugging purposes, this function dumps information about every block in the heap to stdout. + * + * @param heap Handle to a registered heap. + */ +void multi_heap_dump(multi_heap_handle_t heap); + +/** @brief Check heap integrity + * + * Walks the heap and checks all heap data structures are valid. If any errors are detected, an error-specific message + * can be optionally printed to stderr. Print behaviour can be overriden at compile time by defining + * MULTI_CHECK_FAIL_PRINTF in multi_heap_platform.h. + * + * @param heap Handle to a registered heap. + * @param print_errors If true, errors will be printed to stderr. + * @return true if heap is valid, false otherwise. + */ +bool multi_heap_check(multi_heap_handle_t heap, bool print_errors); + +/** @brief Return free heap size + * + * Returns the number of bytes available in the heap. + * + * Equivalent to the total_free_bytes member returned by multi_heap_get_heap_info(). + * + * Note that the heap may be fragmented, so the actual maximum size for a single malloc() may be lower. To know this + * size, see the largest_free_block member returned by multi_heap_get_heap_info(). + * + * @param heap Handle to a registered heap. + * @return Number of free bytes. + */ +size_t multi_heap_free_size(multi_heap_handle_t heap); + +/** @brief Return the lifetime minimum free heap size + * + * Equivalent to the minimum_free_bytes member returned by multi_get_heap_info(). + * + * Returns the lifetime "low water mark" of possible values returned from multi_free_heap_size(), for the specified + * heap. + * + * @param heap Handle to a registered heap. + * @return Number of free bytes. + */ +size_t multi_heap_minimum_free_size(multi_heap_handle_t heap); + +/** @brief Structure to access heap metadata via multi_get_heap_info */ +typedef struct { + size_t total_free_bytes; ///< Total free bytes in the heap. Equivalent to multi_free_heap_size(). + size_t total_allocated_bytes; ///< Total bytes allocated to data in the heap. + size_t largest_free_block; ///< Size of largest free block in the heap. This is the largest malloc-able size. + size_t minimum_free_bytes; ///< Lifetime minimum free heap size. Equivalent to multi_minimum_free_heap_size(). + size_t allocated_blocks; ///< Number of (variable size) blocks allocated in the heap. + size_t free_blocks; ///< Number of (variable size) free blocks in the heap. + size_t total_blocks; ///< Total number of (variable size) blocks in the heap. +} multi_heap_info_t; + +/** @brief Return metadata about a given heap + * + * Fills a multi_heap_info_t structure with information about the specified heap. + * + * @param heap Handle to a registered heap. + * @param info Pointer to a structure to fill with heap metadata. + */ +void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info); + +#ifdef __cplusplus +} +#endif diff --git a/components/heap/multi_heap.c b/components/heap/multi_heap.c new file mode 100644 index 0000000000..65436c9251 --- /dev/null +++ b/components/heap/multi_heap.c @@ -0,0 +1,595 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include +#include +#include +#include +#include +#include +#include + +/* Note: Keep platform-specific parts in this header, this source + file should depend on libc only */ +#include "multi_heap_platform.h" + +#define ALIGN(X) ((X) & ~(sizeof(void *)-1)) +#define ALIGN_UP(X) ALIGN((X)+sizeof(void *)-1) + +struct heap_block; + +/* Block in the heap + + Heap implementation uses two single linked lists, a block list (all blocks) and a free list (free blocks). + + 'header' holds a pointer to the next block (used or free) ORed with a free flag (the LSB of the pointer.) is_free() and get_next_block() utility functions allow typed access to these values. + + 'next_free' is valid if the block is free and is a pointer to the next block in the free list. +*/ +typedef struct heap_block { + intptr_t header; /* Encodes next block in heap (used or unused) and also free/used flag */ + union { + uint8_t data[1]; /* First byte of data, valid if block is used. Actual size of data is 'block_data_size(block)' */ + struct heap_block *next_free; /* Pointer to next free block, valid if block is free */ + }; +} heap_block_t; + +/* These masks apply to the 'header' field of heap_block_t */ +#define BLOCK_FREE_FLAG 0x1 /* If set, this block is free & next_free pointer is valid */ +#define NEXT_BLOCK_MASK (~3) /* AND header with this mask to get pointer to next block (free or used) */ + +/* Metadata header for the heap, stored at the beginning of heap space. + + 'first_block' is a "fake" first block, minimum length, used to provide a pointer to the first used & free block in + the heap. This block is never allocated or merged into an adjacent block. + + 'last_block' is a pointer to a final free block of length 0, which is added at the end of the heap when it is + registered. This block is also never allocated or merged into an adjacent block. + */ +typedef struct multi_heap_info { + void *lock; + size_t free_bytes; + size_t minimum_free_bytes; + heap_block_t *last_block; + heap_block_t first_block; /* initial 'free block', never allocated */ +} heap_t; + +/* Given a pointer to the 'data' field of a block (ie the previous malloc/realloc result), return a pointer to the + containing block. +*/ +static inline heap_block_t *get_block(const void *data_ptr) +{ + return (heap_block_t *)((char *)data_ptr - offsetof(heap_block_t, data)); +} + +/* Return the next sequential block in the heap. + */ +static inline heap_block_t *get_next_block(const heap_block_t *block) +{ + intptr_t next = block->header & NEXT_BLOCK_MASK; + if (next == 0) { + return NULL; /* last_block */ + } + assert(next > (intptr_t)block); + return (heap_block_t *)next; +} + +/* Return true if this block is free. */ +static inline bool is_free(const heap_block_t *block) +{ + return block->header & BLOCK_FREE_FLAG; +} + +/* Return true if this block is the last_block in the heap + (the only block with no next pointer) */ +static inline bool is_last_block(const heap_block_t *block) +{ + return (block->header & NEXT_BLOCK_MASK) == 0; +} + +/* Data size of the block (excludes this block's header) */ +static inline size_t block_data_size(const heap_block_t *block) +{ + intptr_t next = (intptr_t)block->header & NEXT_BLOCK_MASK; + intptr_t this = (intptr_t)block; + if (next == 0) { + return 0; /* this is the last block in the heap */ + } + return next - this - sizeof(block->header); +} + +/* Check a block is valid for this heap. Used to verify parameters. */ +static void assert_valid_block(const heap_t *heap, const heap_block_t *block) +{ + assert(block >= &heap->first_block && block <= heap->last_block); /* block should be in heap */ + if (heap < (const heap_t *)heap->last_block) { + const heap_block_t *next = get_next_block(block); + assert(next >= &heap->first_block && next <= heap->last_block); + if (is_free(block)) { + assert(block->next_free >= &heap->first_block && block->next_free <= heap->last_block); + } + } +} + +/* Get the first free block before 'block' in the heap. 'block' can be a free block or in use. + + Result is always the closest free block to 'block' in the heap, that is located before 'block'. There may be multiple + allocated blocks between the result and 'block'. + + If 'block' is free, the result's 'next_free' pointer will already point to 'block'. + + Result will never be NULL, but it may be the header block heap->first_block. +*/ +static heap_block_t *get_prev_free_block(heap_t *heap, const heap_block_t *block) +{ + assert(block != &heap->first_block); /* can't look for a block before first_block */ + + for (heap_block_t *b = &heap->first_block; b != NULL && b < block; b = b->next_free) { + assert(is_free(b)); + if (b->next_free == NULL || b->next_free >= block) { + if (is_free(block)) { + assert(b->next_free == block); /* if block is on freelist, 'b' should be the item before it. */ + } + return b; /* b is the last free block before 'block' */ + } + } + abort(); /* There should always be a previous free block, even if it's heap->first_block */ +} + +/* Merge some block 'a' into the following block 'b'. + + If both blocks are free, resulting block is marked free. + If only one block is free, resulting block is marked in use. No data is moved. + + This operation may fail if block 'a' is the first block or 'b' is the last block, + the caller should check block_data_size() to know if anything happened here or not. +*/ +static heap_block_t *merge_adjacent(heap_t *heap, heap_block_t *a, heap_block_t *b) +{ + assert(a < b); + + /* Can't merge header blocks, just return the non-header block as-is */ + if (is_last_block(b)) { + return a; + } + if (a == &heap->first_block) { + return b; + } + + assert(get_next_block(a) == b); + + bool free = is_free(a) && is_free(b); /* merging two free blocks creates a free block */ + if (!free && (is_free(a) || is_free(b))) { + /* only one of these blocks is free, so resulting block will be a used block. + means we need to take the free block out of the free list + */ + heap_block_t *free_block = is_free(a) ? a : b; + heap_block_t *prev_free = get_prev_free_block(heap, free_block); + assert(free_block->next_free > prev_free); + prev_free->next_free = free_block->next_free; + + heap->free_bytes -= block_data_size(free_block); + } + + a->header = b->header & NEXT_BLOCK_MASK; + assert(a->header != 0); + if (free) { + a->header |= BLOCK_FREE_FLAG; + assert(b->next_free == NULL || b->next_free > a); + assert(b->next_free == NULL || b->next_free > b); + a->next_free = b->next_free; + + /* b's header can be put into the pool of free bytes */ + heap->free_bytes += sizeof(a->header); + } + + return a; +} + +/* Split a block so it can hold at least 'size' bytes of data, making any spare + space into a new free block. + + 'block' should be marked in-use when this function is called (implementation detail, this function + doesn't set the next_free pointer). + + 'prev_free_block' is the free block before 'block', if already known. Can be NULL if not yet known. + (This is a performance optimisation to avoid walking the freelist twice when possible.) +*/ +static void split_if_necessary(heap_t *heap, heap_block_t *block, size_t size, heap_block_t *prev_free_block) +{ + assert(!is_free(block)); /* split_if_necessary doesn't expect a free block */ + assert(size <= block_data_size(block)); /* can't grow a block this way! */ + size = ALIGN_UP(size); + + /* can't split the head or tail block */ + assert(block != &heap->first_block); + assert(!is_last_block(block)); + + if (block_data_size(block) < size + sizeof(heap_block_t)) { + /* Can't split 'block' if we're not going to get a usable free block afterwards */ + return; + } + + /* Block is larger than it needs to be, insert a new free block after it */ + heap_block_t *new_block = (heap_block_t *)(block->data + size); + new_block->header = block->header | BLOCK_FREE_FLAG; + block->header = (intptr_t)new_block; + + if (prev_free_block == NULL) { + prev_free_block = get_prev_free_block(heap, block); + } + assert(prev_free_block->next_free > new_block); /* prev_free_block should point to a free block after new_block */ + new_block->next_free = prev_free_block->next_free; + prev_free_block->next_free = new_block; + heap->free_bytes += block_data_size(new_block); +} + +size_t multi_heap_get_allocated_size(multi_heap_handle_t heap, void *p) +{ + heap_block_t *pb = get_block(p); + + assert_valid_block(heap, pb); + assert(!is_free(pb)); + return block_data_size(pb); +} + +multi_heap_handle_t multi_heap_register(void *start, size_t size) +{ + heap_t *heap = (heap_t *)ALIGN_UP((intptr_t)start); + uintptr_t end = ALIGN((uintptr_t)start + size); + if (end - (uintptr_t)start < sizeof(heap_t) + 2*sizeof(heap_block_t)) { + return NULL; /* 'size' is too small to fit a heap here */ + } + heap->lock = NULL; + heap->last_block = (heap_block_t *)(end - sizeof(heap_block_t)); + + /* first 'real' (allocatable) free block goes after the heap structure */ + heap_block_t *first_free_block = (heap_block_t *)((intptr_t)start + sizeof(heap_t)); + first_free_block->header = (intptr_t)heap->last_block | BLOCK_FREE_FLAG; + first_free_block->next_free = heap->last_block; + + /* last block is 'free' but has a NULL next pointer */ + heap->last_block->header = BLOCK_FREE_FLAG; + heap->last_block->next_free = NULL; + + /* first block also 'free' but has legitimate length, + malloc will never allocate into this block. */ + heap->first_block.header = (intptr_t)first_free_block | BLOCK_FREE_FLAG; + heap->first_block.next_free = first_free_block; + + /* free bytes is: + - total bytes in heap + - minus heap_t header at top (includes heap->first_block) + - minus header of first_free_block + - minus whole block at heap->last_block + */ + heap->free_bytes = ALIGN(size) - sizeof(heap_t) - sizeof(first_free_block->header) - sizeof(heap_block_t); + heap->minimum_free_bytes = heap->free_bytes; + + return heap; +} + +void multi_heap_set_lock(multi_heap_handle_t heap, void *lock) +{ + heap->lock = lock; +} + +void *multi_heap_malloc(multi_heap_handle_t heap, size_t size) +{ + heap_block_t *best_block = NULL; + heap_block_t *prev_free = NULL; + heap_block_t *prev = NULL; + size_t best_size = SIZE_MAX; + size = ALIGN_UP(size); + + if (size == 0 || heap == NULL || heap->free_bytes < size) { + return NULL; + } + + MULTI_HEAP_LOCK(heap->lock); + + /* Find best free block to perform the allocation in */ + prev = &heap->first_block; + for (heap_block_t *b = heap->first_block.next_free; b != NULL; b = b->next_free) { + assert(is_free(b)); + size_t bs = block_data_size(b); + if (bs >= size && bs < best_size) { + best_block = b; + best_size = bs; + prev_free = prev; + if (bs == size) { + break; /* we've found a perfect sized block */ + } + } + prev = b; + } + + if (best_block == NULL) { + MULTI_HEAP_UNLOCK(heap->lock); + return NULL; /* No room in heap */ + } + + prev_free->next_free = best_block->next_free; + best_block->header &= ~BLOCK_FREE_FLAG; + + heap->free_bytes -= block_data_size(best_block); + + split_if_necessary(heap, best_block, size, prev_free); + + if (heap->free_bytes < heap->minimum_free_bytes) { + heap->minimum_free_bytes = heap->free_bytes; + } + + MULTI_HEAP_UNLOCK(heap->lock); + + return best_block->data; +} + +void multi_heap_free(multi_heap_handle_t heap, void *p) +{ + heap_block_t *pb = get_block(p); + + if (heap == NULL || p == NULL) { + return; + } + + MULTI_HEAP_LOCK(heap->lock); + + assert_valid_block(heap, pb); + assert(!is_free(pb)); + assert(!is_last_block(pb)); + assert(pb != &heap->first_block); + + heap_block_t *next = get_next_block(pb); + + /* Update freelist pointers */ + heap_block_t *prev_free = get_prev_free_block(heap, pb); + assert(prev_free->next_free == NULL || prev_free->next_free > pb); + pb->next_free = prev_free->next_free; + prev_free->next_free = pb; + + /* Mark this block as free */ + pb->header |= BLOCK_FREE_FLAG; + + heap->free_bytes += block_data_size(pb); + + /* Try and merge previous free block into this one */ + if (get_next_block(prev_free) == pb) { + pb = merge_adjacent(heap, prev_free, pb); + } + + /* If next block is free, try to merge the two */ + if (is_free(next)) { + pb = merge_adjacent(heap, pb, next); + } + + MULTI_HEAP_UNLOCK(heap->lock); +} + + +void *multi_heap_realloc(multi_heap_handle_t heap, void *p, size_t size) +{ + heap_block_t *pb = get_block(p); + void *result; + size = ALIGN_UP(size); + + assert(heap != NULL); + + if (p == NULL) { + return multi_heap_malloc(heap, size); + } + + assert_valid_block(heap, pb); + assert(!is_free(pb) && "realloc arg should be allocated"); + + if (size == 0) { + multi_heap_free(heap, p); + return NULL; + } + + if (heap == NULL) { + return NULL; + } + + MULTI_HEAP_LOCK(heap->lock); + result = NULL; + + if (size <= block_data_size(pb)) { + // Shrinking.... + split_if_necessary(heap, pb, size, NULL); + result = pb->data; + } + else if (heap->free_bytes < size - block_data_size(pb)) { + // Growing, but there's not enough total free space in the heap + MULTI_HEAP_UNLOCK(heap->lock); + return NULL; + } + + // New size is larger than existing block + if (result == NULL) { + // See if we can grow into one or both adjacent blocks + heap_block_t *orig_pb = pb; + size_t orig_size = block_data_size(orig_pb); + heap_block_t *next = get_next_block(pb); + heap_block_t *prev = get_prev_free_block(heap, pb); + + // Can only grow into the previous free block if it's adjacent + size_t prev_grow_size = (get_next_block(prev) == pb) ? block_data_size(prev) : 0; + + // Can grow into next block? (we may also need to grow into 'prev' to get to our desired size) + if (is_free(next) && (block_data_size(pb) + block_data_size(next) + prev_grow_size >= size)) { + pb = merge_adjacent(heap, pb, next); + } + + // Can grow into previous block? + // (try this even if we're already big enough from growing into 'next', as it reduces fragmentation) + if (prev_grow_size > 0 && (block_data_size(pb) + prev_grow_size >= size)) { + pb = merge_adjacent(heap, prev, pb); + // this doesn't guarantee we'll be left with a big enough block, as it's + // possible for the merge to fail if prev == heap->first_block + } + + if (block_data_size(pb) >= size) { + memmove(pb->data, orig_pb->data, orig_size); + split_if_necessary(heap, pb, size, NULL); + result = pb->data; + } + } + + if (result == NULL) { + // Need to allocate elsewhere and copy data over + result = multi_heap_malloc(heap, size); + if (result != NULL) { + memcpy(result, pb->data, block_data_size(pb)); + multi_heap_free(heap, pb->data); + } + } + + if (heap->free_bytes < heap->minimum_free_bytes) { + heap->minimum_free_bytes = heap->free_bytes; + } + + MULTI_HEAP_UNLOCK(heap->lock); + return result; +} + +#define FAIL_PRINT(MSG, ...) do { \ + if (print_errors) { \ + MULTI_HEAP_STDERR_PRINTF(MSG, __VA_ARGS__); \ + } \ + valid = false; \ + } \ + while(0) + +bool multi_heap_check(multi_heap_handle_t heap, bool print_errors) +{ + bool valid = true; + size_t total_free_bytes = 0; + assert(heap != NULL); + + MULTI_HEAP_LOCK(heap->lock); + + heap_block_t *prev = NULL; + heap_block_t *prev_free = NULL; + heap_block_t *expected_free = NULL; + + /* note: not using get_next_block() in loop, so that assertions aren't checked here */ + for(heap_block_t *b = &heap->first_block; b != NULL; b = (heap_block_t *)(b->header & NEXT_BLOCK_MASK)) { + if (b == prev) { + FAIL_PRINT("CORRUPT HEAP: Block %p points to itself\n", b); + goto done; + } + if (b < prev) { + FAIL_PRINT("CORRUPT HEAP: Block %p is before prev block %p\n", b, prev); + goto done; + } + if (b > heap->last_block || b < &heap->first_block) { + FAIL_PRINT("CORRUPT HEAP: Block %p is outside heap (last valid block %p)\n", b, prev); + goto done; + } + prev = b; + + if (is_free(b)) { + if (expected_free != NULL && expected_free != b) { + FAIL_PRINT("CORRUPT HEAP: Prev free block %p pointed to next free %p but this free block is %p\n", + prev_free, expected_free, b); + } + prev_free = b; + expected_free = b->next_free; + if (b != &heap->first_block) { + total_free_bytes += block_data_size(b); + } + } + } + + if (prev != heap->last_block) { + FAIL_PRINT("CORRUPT HEAP: Ended at %p not %p\n", prev, heap->last_block); + } + if (!is_free(heap->last_block)) { + FAIL_PRINT("CORRUPT HEAP: Expected prev block %p to be free\n", heap->last_block); + } + + if (heap->free_bytes != total_free_bytes) { + FAIL_PRINT("CORRUPT HEAP: Expected %u free bytes counted %u\n", (unsigned)heap->free_bytes, (unsigned)total_free_bytes); + } + + done: + MULTI_HEAP_UNLOCK(heap->lock); + + return valid; +} + +void multi_heap_dump(multi_heap_handle_t heap) +{ + assert(heap != NULL); + + MULTI_HEAP_LOCK(heap->lock); + printf("Heap start %p end %p\nFirst free block %p\n", &heap->first_block, heap->last_block, heap->first_block.next_free); + for(heap_block_t *b = &heap->first_block; b != NULL; b = get_next_block(b)) { + printf("Block %p data size 0x%08zx bytes next block %p", b, block_data_size(b), get_next_block(b)); + if (is_free(b)) { + printf(" FREE. Next free %p\n", b->next_free); + } else { + printf("\n"); + } + } + MULTI_HEAP_UNLOCK(heap->lock); +} + +size_t multi_heap_free_size(multi_heap_handle_t heap) +{ + if (heap == NULL) { + return 0; + } + return heap->free_bytes; +} + +size_t multi_heap_minimum_free_size(multi_heap_handle_t heap) +{ + if (heap == NULL) { + return 0; + } + return heap->minimum_free_bytes; +} + +void multi_heap_get_info(multi_heap_handle_t heap, multi_heap_info_t *info) +{ + memset(info, 0, sizeof(multi_heap_info_t)); + + if (heap == NULL) { + return; + } + + MULTI_HEAP_LOCK(heap->lock); + for(heap_block_t *b = get_next_block(&heap->first_block); !is_last_block(b); b = get_next_block(b)) { + info->total_blocks++; + if (is_free(b)) { + size_t s = block_data_size(b); + info->total_free_bytes += s; + if (s > info->largest_free_block) { + info->largest_free_block = s; + } + info->free_blocks++; + } else { + info->total_allocated_bytes += block_data_size(b); + info->allocated_blocks++; + } + } + + info->minimum_free_bytes = heap->minimum_free_bytes; + assert(info->total_free_bytes == heap->free_bytes); + + MULTI_HEAP_UNLOCK(heap->lock); + +} diff --git a/components/heap/multi_heap_platform.h b/components/heap/multi_heap_platform.h new file mode 100644 index 0000000000..2c4d9f96f6 --- /dev/null +++ b/components/heap/multi_heap_platform.h @@ -0,0 +1,50 @@ +// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#ifdef ESP_PLATFORM + +#include +#include +#include + +/* Because malloc/free can happen inside an ISR context, + we need to use portmux spinlocks here not RTOS mutexes */ +#define MULTI_HEAP_LOCK(PLOCK) do { \ + if((PLOCK) != NULL) { \ + taskENTER_CRITICAL((portMUX_TYPE *)(PLOCK)); \ + } \ + } while(0) + + +#define MULTI_HEAP_UNLOCK(PLOCK) do { \ + if ((PLOCK) != NULL) { \ + taskEXIT_CRITICAL((portMUX_TYPE *)(PLOCK)); \ + } \ + } while(0) + +/* Not safe to use std i/o while in a portmux critical section, + can deadlock, so we use the ROM equivalent functions. */ + +#define MULTI_HEAP_PRINTF ets_printf +#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) ets_printf(MSG, __VA_ARGS__) + +#else + +#define MULTI_HEAP_PRINTF printf +#define MULTI_HEAP_STDERR_PRINTF(MSG, ...) fprintf(stderr, MSG, __VA_ARGS__) +#define MULTI_HEAP_LOCK(PLOCK) +#define MULTI_HEAP_UNLOCK(PLOCK) + +#endif diff --git a/components/heap/test/component.mk b/components/heap/test/component.mk new file mode 100644 index 0000000000..5dd172bdb7 --- /dev/null +++ b/components/heap/test/component.mk @@ -0,0 +1,5 @@ +# +#Component Makefile +# + +COMPONENT_ADD_LDFLAGS = -Wl,--whole-archive -l$(COMPONENT_NAME) -Wl,--no-whole-archive diff --git a/components/freertos/test/test_malloc.c b/components/heap/test/test_malloc.c similarity index 88% rename from components/freertos/test/test_malloc.c rename to components/heap/test/test_malloc.c index 7bcc291e30..2ef74965a1 100644 --- a/components/freertos/test/test_malloc.c +++ b/components/heap/test/test_malloc.c @@ -16,17 +16,23 @@ #include "soc/dport_reg.h" #include "soc/io_mux_reg.h" +#include "esp_panic.h" + static int tryAllocMem() { int **mem; int i, noAllocated, j; - mem=malloc(sizeof(int)*1024); + + mem=malloc(sizeof(int *)*1024); if (!mem) return 0; + for (i=0; i<1024; i++) { mem[i]=malloc(1024); if (mem[i]==NULL) break; for (j=0; j<1024/4; j++) mem[i][j]=(0xdeadbeef); } + noAllocated=i; + for (i=0; i +#include +#include "unity.h" +#include "esp_attr.h" +#include "esp_heap_caps.h" +#include "esp_spi_flash.h" +#include + +TEST_CASE("Capabilities allocator test", "[heap]") +{ + char *m1, *m2[10]; + int x; + size_t free8start, free32start, free8, free32; + + /* It's important we printf() something before we take the empty heap sizes, + as the first printf() in a task allocates heap resources... */ + printf("Testing capabilities allocator...\n"); + + free8start = heap_caps_get_free_size(MALLOC_CAP_8BIT); + free32start = heap_caps_get_free_size(MALLOC_CAP_32BIT); + printf("Free 8bit-capable memory (start): %dK, 32-bit capable memory %dK\n", free8start, free32start); + TEST_ASSERT(free32start>free8start); + + printf("Allocating 10K of 8-bit capable RAM\n"); + m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT); + printf("--> %p\n", m1); + free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT); + free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT); + printf("Free 8bit-capable memory (both reduced): %dK, 32-bit capable memory %dK\n", free8, free32); + //Both should have gone down by 10K; 8bit capable ram is also 32-bit capable + TEST_ASSERT(free8<(free8start-10*1024)); + TEST_ASSERT(free32<(free32start-10*1024)); + //Assume we got DRAM back + TEST_ASSERT((((int)m1)&0xFF000000)==0x3F000000); + free(m1); + + printf("Freeing; allocating 10K of 32K-capable RAM\n"); + m1 = heap_caps_malloc(10*1024, MALLOC_CAP_32BIT); + printf("--> %p\n", m1); + free8 = heap_caps_get_free_size(MALLOC_CAP_8BIT); + free32 = heap_caps_get_free_size(MALLOC_CAP_32BIT); + printf("Free 8bit-capable memory (after 32-bit): %dK, 32-bit capable memory %dK\n", free8, free32); + //Only 32-bit should have gone down by 10K: 32-bit isn't necessarily 8bit capable + TEST_ASSERT(free32<(free32start-10*1024)); + TEST_ASSERT(free8==free8start); + //Assume we got IRAM back + TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000); + free(m1); + printf("Allocating impossible caps\n"); + m1= heap_caps_malloc(10*1024, MALLOC_CAP_8BIT|MALLOC_CAP_EXEC); + printf("--> %p\n", m1); + TEST_ASSERT(m1==NULL); + printf("Testing changeover iram -> dram"); + // priorities will exhaust IRAM first, then start allocating from DRAM + for (x=0; x<10; x++) { + m2[x]= heap_caps_malloc(10*1024, MALLOC_CAP_32BIT); + printf("--> %p\n", m2[x]); + } + TEST_ASSERT((((int)m2[0])&0xFF000000)==0x40000000); + TEST_ASSERT((((int)m2[9])&0xFF000000)==0x3F000000); + printf("Test if allocating executable code still gives IRAM, even with dedicated IRAM region depleted\n"); + // (the allocation should come from D/IRAM) + m1= heap_caps_malloc(10*1024, MALLOC_CAP_EXEC); + printf("--> %p\n", m1); + TEST_ASSERT((((int)m1)&0xFF000000)==0x40000000); + free(m1); + for (x=0; x<10; x++) free(m2[x]); + printf("Done.\n"); +} + +TEST_CASE("heap_caps metadata test", "[heap]") +{ + /* need to print something as first printf allocates some heap */ + printf("heap_caps metadata test\n"); + heap_caps_print_heap_info(MALLOC_CAP_8BIT); + heap_caps_print_heap_info(MALLOC_CAP_32BIT); + + multi_heap_info_t original; + heap_caps_get_info(&original, MALLOC_CAP_8BIT); + + void *b = heap_caps_malloc(original.largest_free_block, MALLOC_CAP_8BIT); + TEST_ASSERT_NOT_NULL(b); + + printf("After allocating %d bytes:\n", original.largest_free_block); + heap_caps_print_heap_info(MALLOC_CAP_8BIT); + + multi_heap_info_t after; + heap_caps_get_info(&after, MALLOC_CAP_8BIT); + TEST_ASSERT(after.largest_free_block < original.largest_free_block); + TEST_ASSERT(after.total_free_bytes < original.total_free_bytes); + + free(b); + heap_caps_get_info(&after, MALLOC_CAP_8BIT); + TEST_ASSERT_EQUAL(after.total_free_bytes, original.total_free_bytes); + TEST_ASSERT_EQUAL(after.largest_free_block, original.largest_free_block); + TEST_ASSERT(after.minimum_free_bytes < original.total_free_bytes); +} + +/* Small function runs from IRAM to check that malloc/free/realloc + all work OK when cache is disabled... +*/ +static IRAM_ATTR __attribute__((noinline)) bool iram_malloc_test() +{ + g_flash_guard_default_ops.start(); // Disables flash cache + + bool result = true; + void *x = heap_caps_malloc(64, MALLOC_CAP_32BIT); + result = result && (x != NULL); + void *y = heap_caps_realloc(x, 32, MALLOC_CAP_32BIT); + result = result && (y != NULL); + heap_caps_free(y); + + g_flash_guard_default_ops.end(); // Re-enables flash cache + + return result; +} + +TEST_CASE("heap_caps_xxx functions work with flash cache disabled", "[heap]") +{ + TEST_ASSERT( iram_malloc_test() ); +} diff --git a/components/heap/test_multi_heap_host/Makefile b/components/heap/test_multi_heap_host/Makefile new file mode 100644 index 0000000000..03f050d849 --- /dev/null +++ b/components/heap/test_multi_heap_host/Makefile @@ -0,0 +1,48 @@ +TEST_PROGRAM=test_multi_heap +all: $(TEST_PROGRAM) + +SOURCE_FILES = $(abspath \ + ../multi_heap.c \ + test_multi_heap.cpp \ + main.cpp \ + ) + +INCLUDE_FLAGS = -I../include -I../../../tools/catch + +GCOV ?= gcov + +CPPFLAGS += $(INCLUDE_FLAGS) -D CONFIG_LOG_DEFAULT_LEVEL -g -fstack-protector-all -m32 +CFLAGS += -fprofile-arcs -ftest-coverage +CXXFLAGS += -std=c++11 -Wall -Werror -fprofile-arcs -ftest-coverage +LDFLAGS += -lstdc++ -fprofile-arcs -ftest-coverage -m32 + +OBJ_FILES = $(filter %.o, $(SOURCE_FILES:.cpp=.o) $(SOURCE_FILES:.c=.o)) + +COVERAGE_FILES = $(OBJ_FILES:.o=.gc*) + +$(TEST_PROGRAM): $(OBJ_FILES) + g++ $(LDFLAGS) -o $(TEST_PROGRAM) $(OBJ_FILES) + +$(OUTPUT_DIR): + mkdir -p $(OUTPUT_DIR) + +test: $(TEST_PROGRAM) + ./$(TEST_PROGRAM) + +$(COVERAGE_FILES): $(TEST_PROGRAM) test + +coverage.info: $(COVERAGE_FILES) + find ../ -name "*.gcno" -exec $(GCOV) -r -pb {} + + lcov --capture --directory $(abspath ../) --no-external --output-file coverage.info --gcov-tool $(GCOV) + +coverage_report: coverage.info + genhtml coverage.info --output-directory coverage_report + @echo "Coverage report is in coverage_report/index.html" + +clean: + rm -f $(OBJ_FILES) $(TEST_PROGRAM) + rm -f $(COVERAGE_FILES) *.gcov + rm -rf coverage_report/ + rm -f coverage.info + +.PHONY: clean all test diff --git a/components/heap/test_multi_heap_host/main.cpp b/components/heap/test_multi_heap_host/main.cpp new file mode 100644 index 0000000000..0c7c351f43 --- /dev/null +++ b/components/heap/test_multi_heap_host/main.cpp @@ -0,0 +1,2 @@ +#define CATCH_CONFIG_MAIN +#include "catch.hpp" diff --git a/components/heap/test_multi_heap_host/test_multi_heap.cpp b/components/heap/test_multi_heap_host/test_multi_heap.cpp new file mode 100644 index 0000000000..79848b07bf --- /dev/null +++ b/components/heap/test_multi_heap_host/test_multi_heap.cpp @@ -0,0 +1,347 @@ +#include "catch.hpp" +#include "multi_heap.h" + +#include + +/* Insurance against accidentally using libc heap functions in tests */ +#undef free +#define free #error +#undef malloc +#define malloc #error +#undef calloc +#define calloc #error +#undef realloc +#define realloc #error + +TEST_CASE("multi_heap simple allocations", "[multi_heap]") +{ + uint8_t small_heap[128]; + + multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap)); + + size_t test_alloc_size = (multi_heap_free_size(heap) + 4) / 2; + + printf("New heap:\n"); + multi_heap_dump(heap); + printf("*********************\n"); + + void *buf = multi_heap_malloc(heap, test_alloc_size); + + printf("First malloc:\n"); + multi_heap_dump(heap); + printf("*********************\n"); + + printf("small_heap %p buf %p\n", small_heap, buf); + REQUIRE( buf != NULL ); + REQUIRE((intptr_t)buf >= (intptr_t)small_heap); + REQUIRE( (intptr_t)buf < (intptr_t)(small_heap + sizeof(small_heap))); + + REQUIRE( multi_heap_get_allocated_size(heap, buf) >= test_alloc_size ); + REQUIRE( multi_heap_get_allocated_size(heap, buf) < test_alloc_size + 16); + + memset(buf, 0xEE, test_alloc_size); + + REQUIRE( multi_heap_malloc(heap, test_alloc_size) == NULL ); + + multi_heap_free(heap, buf); + + printf("Empty?\n"); + multi_heap_dump(heap); + printf("*********************\n"); + + /* Now there should be space for another allocation */ + buf = multi_heap_malloc(heap, test_alloc_size); + REQUIRE( buf != NULL ); + multi_heap_free(heap, buf); + + REQUIRE( multi_heap_free_size(heap) > multi_heap_minimum_free_size(heap) ); +} + + +TEST_CASE("multi_heap fragmentation", "[multi_heap]") +{ + uint8_t small_heap[200]; + multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap)); + + /* allocate enough that we can't fit 6 alloc_size blocks in the heap (due to + per-allocation block overhead. This calculation works for 32-bit pointers, + probably needs tweaking for 64-bit. */ + size_t alloc_size = ((multi_heap_free_size(heap)) / 6) & ~(sizeof(void *) - 1); + + printf("alloc_size %zu\n", alloc_size); + + void *p[4]; + for (int i = 0; i < 4; i++) { + multi_heap_dump(heap); + REQUIRE( multi_heap_check(heap, true) ); + p[i] = multi_heap_malloc(heap, alloc_size); + printf("%d = %p ****->\n", i, p[i]); + multi_heap_dump(heap); + REQUIRE( p[i] != NULL ); + } + + printf("allocated %p %p %p %p\n", p[0], p[1], p[2], p[3]); + + REQUIRE( multi_heap_malloc(heap, alloc_size * 3) == NULL ); /* no room to allocate 3*alloc_size now */ + + printf("4 allocations:\n"); + multi_heap_dump(heap); + printf("****************\n"); + + multi_heap_free(heap, p[0]); + multi_heap_free(heap, p[1]); + multi_heap_free(heap, p[3]); + + printf("1 allocations:\n"); + multi_heap_dump(heap); + printf("****************\n"); + + void *big = multi_heap_malloc(heap, alloc_size * 3); + REQUIRE( p[3] == big ); /* big should go where p[3] was freed from */ + multi_heap_free(heap, big); + + multi_heap_free(heap, p[2]); + + printf("0 allocations:\n"); + multi_heap_dump(heap); + printf("****************\n"); + + big = multi_heap_malloc(heap, alloc_size * 2); + REQUIRE( p[0] == big ); /* big should now go where p[0] was freed from */ + multi_heap_free(heap, big); +} + +TEST_CASE("multi_heap many random allocations", "[multi_heap]") +{ + uint8_t big_heap[1024]; + const int NUM_POINTERS = 64; + + void *p[NUM_POINTERS] = { 0 }; + size_t s[NUM_POINTERS] = { 0 }; + multi_heap_handle_t heap = multi_heap_register(big_heap, sizeof(big_heap)); + + const size_t initial_free = multi_heap_free_size(heap); + + const int ITERATIONS = 100000; + + for (int i = 0; i < ITERATIONS; i++) { + /* check all pointers allocated so far are valid inside big_heap */ + for (int j = 0; j < NUM_POINTERS; j++) { + if (p[j] != NULL) { + } + } + + uint8_t n = rand() % NUM_POINTERS; + + if (rand() % 4 == 0) { + /* 1 in 4 iterations, try to realloc the buffer instead + of using malloc/free + */ + size_t new_size = rand() % 1024; + void *new_p = multi_heap_realloc(heap, p[n], new_size); + if (new_size == 0 || new_p != NULL) { + p[n] = new_p; + if (new_size > 0) { + REQUIRE( p[n] >= big_heap ); + REQUIRE( p[n] < big_heap + sizeof(big_heap) ); + } + s[n] = new_size; + memset(p[n], n, s[n]); + } + REQUIRE( multi_heap_check(heap, true) ); + continue; + } + + if (p[n] != NULL) { + if (s[n] > 0) { + /* Verify pre-existing contents of p[n] */ + uint8_t compare[s[n]]; + memset(compare, n, s[n]); + REQUIRE( memcmp(compare, p[n], s[n]) == 0 ); + } + //printf("free %zu bytes %p\n", s[n], p[n]); + multi_heap_free(heap, p[n]); + if (!multi_heap_check(heap, true)) { + printf("FAILED iteration %d after freeing %p\n", i, p[n]); + multi_heap_dump(heap); + REQUIRE(0); + } + } + + s[n] = rand() % 1024; + p[n] = multi_heap_malloc(heap, s[n]); + if (p[n] != NULL) { + REQUIRE( p[n] >= big_heap ); + REQUIRE( p[n] < big_heap + sizeof(big_heap) ); + } + if (!multi_heap_check(heap, true)) { + printf("FAILED iteration %d after mallocing %p (%zu bytes)\n", i, p[n], s[n]); + multi_heap_dump(heap); + REQUIRE(0); + } + + if (p[n] != NULL) { + memset(p[n], n, s[n]); + } + } + + for (int i = 0; i < NUM_POINTERS; i++) { + multi_heap_free(heap, p[i]); + if (!multi_heap_check(heap, true)) { + printf("FAILED during cleanup after freeing %p\n", p[i]); + multi_heap_dump(heap); + REQUIRE(0); + } + } + + REQUIRE( initial_free == multi_heap_free_size(heap) ); +} + +TEST_CASE("multi_heap_get_info() function", "[multi_heap]") +{ + uint8_t heapdata[256]; + multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata)); + multi_heap_info_t before, after, freed; + + multi_heap_get_info(heap, &before); + printf("before: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n", + before.total_free_bytes, + before.total_allocated_bytes, + before.largest_free_block, + before.minimum_free_bytes, + before.allocated_blocks, + before.free_blocks, + before.total_blocks); + + REQUIRE( 0 == before.allocated_blocks ); + REQUIRE( 0 == before.total_allocated_bytes ); + REQUIRE( before.total_free_bytes == before.minimum_free_bytes ); + + void *x = multi_heap_malloc(heap, 32); + multi_heap_get_info(heap, &after); + printf("after: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n", + after.total_free_bytes, + after.total_allocated_bytes, + after.largest_free_block, + after.minimum_free_bytes, + after.allocated_blocks, + after.free_blocks, + after.total_blocks); + + REQUIRE( 1 == after.allocated_blocks ); + REQUIRE( 32 == after.total_allocated_bytes ); + REQUIRE( after.minimum_free_bytes < before.minimum_free_bytes); + REQUIRE( after.minimum_free_bytes > 0 ); + + multi_heap_free(heap, x); + multi_heap_get_info(heap, &freed); + printf("freed: total_free_bytes %zu\ntotal_allocated_bytes %zu\nlargest_free_block %zu\nminimum_free_bytes %zu\nallocated_blocks %zu\nfree_blocks %zu\ntotal_blocks %zu\n", + freed.total_free_bytes, + freed.total_allocated_bytes, + freed.largest_free_block, + freed.minimum_free_bytes, + freed.allocated_blocks, + freed.free_blocks, + freed.total_blocks); + + REQUIRE( 0 == freed.allocated_blocks ); + REQUIRE( 0 == freed.total_allocated_bytes ); + REQUIRE( before.total_free_bytes == freed.total_free_bytes ); + REQUIRE( after.minimum_free_bytes == freed.minimum_free_bytes ); +} + +TEST_CASE("multi_heap minimum-size allocations", "[multi_heap]") +{ + uint8_t heapdata[16384]; + void *p[sizeof(heapdata) / sizeof(void *)]; + const size_t NUM_P = sizeof(p) / sizeof(void *); + multi_heap_handle_t heap = multi_heap_register(heapdata, sizeof(heapdata)); + + size_t before_free = multi_heap_free_size(heap); + + size_t i; + for (i = 0; i < NUM_P; i++) { + p[i] = multi_heap_malloc(heap, 1); + if (p[i] == NULL) { + break; + } + } + + REQUIRE( i < NUM_P); // Should have run out of heap before we ran out of pointers + printf("Allocated %zu minimum size chunks\n", i); + + REQUIRE( 0 == multi_heap_free_size(heap) ); + multi_heap_check(heap, true); + + /* Free in random order */ + bool has_allocations = true; + while (has_allocations) { + i = rand() % NUM_P; + multi_heap_free(heap, p[i]); + p[i] = NULL; + multi_heap_check(heap, true); + + has_allocations = false; + for (i = 0; i < NUM_P && !has_allocations; i++) { + has_allocations = (p[i] != NULL); + } + } + + /* all freed! */ + REQUIRE( before_free == multi_heap_free_size(heap) ); +} + +TEST_CASE("multi_heap_realloc()", "[multi_heap]") +{ + const uint32_t PATTERN = 0xABABDADA; + uint8_t small_heap[256]; + multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap)); + + uint32_t *a = (uint32_t *)multi_heap_malloc(heap, 64); + uint32_t *b = (uint32_t *)multi_heap_malloc(heap, 32); + REQUIRE( a != NULL ); + REQUIRE( b != NULL ); + REQUIRE( b > a); /* 'b' takes the block after 'a' */ + + *a = PATTERN; + + uint32_t *c = (uint32_t *)multi_heap_realloc(heap, a, 72); + REQUIRE( multi_heap_check(heap, true)); + REQUIRE( c != NULL ); + REQUIRE( c > b ); /* 'a' moves, 'c' takes the block after 'b' */ + REQUIRE( *c == PATTERN ); + + uint32_t *d = (uint32_t *)multi_heap_realloc(heap, c, 36); + REQUIRE( multi_heap_check(heap, true) ); + REQUIRE( c == d ); /* 'c' block should be shrunk in-place */ + REQUIRE( *d == PATTERN); + + uint32_t *e = (uint32_t *)multi_heap_malloc(heap, 64); + REQUIRE( multi_heap_check(heap, true)); + REQUIRE( a == e ); /* 'e' takes the block formerly occupied by 'a' */ + + multi_heap_free(heap, d); + uint32_t *f = (uint32_t *)multi_heap_realloc(heap, b, 64); + REQUIRE( multi_heap_check(heap, true) ); + REQUIRE( f == b ); /* 'b' should be extended in-place, over space formerly occupied by 'd' */ + + uint32_t *g = (uint32_t *)multi_heap_realloc(heap, e, 128); /* not enough contiguous space left in the heap */ + REQUIRE( g == NULL ); + + multi_heap_free(heap, f); + /* try again */ + g = (uint32_t *)multi_heap_realloc(heap, e, 128); + REQUIRE( multi_heap_check(heap, true) ); + REQUIRE( e == g ); /* 'g' extends 'e' in place, into the space formerly held by 'f' */ +} + +TEST_CASE("corrupt heap block", "[multi_heap]") +{ + uint8_t small_heap[256]; + multi_heap_handle_t heap = multi_heap_register(small_heap, sizeof(small_heap)); + + void *a = multi_heap_malloc(heap, 32); + REQUIRE( multi_heap_check(heap, true) ); + memset(a, 0xEE, 64); + REQUIRE( !multi_heap_check(heap, true) ); +} diff --git a/components/newlib/syscalls.c b/components/newlib/syscalls.c index 74182d07f2..f32e910b76 100644 --- a/components/newlib/syscalls.c +++ b/components/newlib/syscalls.c @@ -21,42 +21,28 @@ #include #include "esp_attr.h" #include "freertos/FreeRTOS.h" +#include "esp_heap_caps.h" void* IRAM_ATTR _malloc_r(struct _reent *r, size_t size) { - return pvPortMalloc(size); + return heap_caps_malloc( size, MALLOC_CAP_8BIT ); } void IRAM_ATTR _free_r(struct _reent *r, void* ptr) { - vPortFree(ptr); + heap_caps_free( ptr ); } void* IRAM_ATTR _realloc_r(struct _reent *r, void* ptr, size_t size) { - void* new_chunk; - if (size == 0) { - if (ptr) { - vPortFree(ptr); - } - return NULL; - } - - new_chunk = pvPortMalloc(size); - if (new_chunk && ptr) { - memcpy(new_chunk, ptr, size); - vPortFree(ptr); - } - // realloc behaviour: don't free original chunk if alloc failed - return new_chunk; + return heap_caps_realloc( ptr, size, MALLOC_CAP_8BIT ); } void* IRAM_ATTR _calloc_r(struct _reent *r, size_t count, size_t size) { - void* result = pvPortMalloc(count * size); - if (result) - { - memset(result, 0, count * size); + void* result = heap_caps_malloc(count * size, MALLOC_CAP_8BIT); + if (result) { + bzero(result, count * size); } return result; } diff --git a/components/sdmmc/sdmmc_cmd.c b/components/sdmmc/sdmmc_cmd.c index 659ff5dd6c..e0474c80f9 100644 --- a/components/sdmmc/sdmmc_cmd.c +++ b/components/sdmmc/sdmmc_cmd.c @@ -17,7 +17,7 @@ #include #include "esp_log.h" -#include "esp_heap_alloc_caps.h" +#include "esp_heap_caps.h" #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "driver/sdmmc_defs.h" @@ -413,7 +413,7 @@ static esp_err_t sdmmc_decode_scr(uint32_t *raw_scr, sdmmc_scr_t* out_scr) static esp_err_t sdmmc_send_cmd_send_scr(sdmmc_card_t* card, sdmmc_scr_t *out_scr) { size_t datalen = 8; - uint32_t* buf = (uint32_t*) pvPortMallocCaps(datalen, MALLOC_CAP_DMA); + uint32_t* buf = (uint32_t*) heap_caps_malloc(datalen, MALLOC_CAP_DMA); if (buf == NULL) { return ESP_ERR_NO_MEM; } diff --git a/components/soc/component.mk b/components/soc/component.mk index 1d5b621a9a..5b3f81a465 100755 --- a/components/soc/component.mk +++ b/components/soc/component.mk @@ -2,4 +2,4 @@ SOC_NAME := esp32 COMPONENT_SRCDIRS := $(SOC_NAME) -COMPONENT_ADD_INCLUDEDIRS := $(SOC_NAME)/include +COMPONENT_ADD_INCLUDEDIRS := $(SOC_NAME)/include include diff --git a/components/soc/esp32/include/soc/soc.h b/components/soc/esp32/include/soc/soc.h index 70e54ded25..fdcf92cd3b 100644 --- a/components/soc/esp32/include/soc/soc.h +++ b/components/soc/esp32/include/soc/soc.h @@ -269,6 +269,28 @@ #define TICKS_PER_US_ROM 26 // CPU is 80MHz //}} +/* Overall memory map */ +#define SOC_IROM_LOW 0x400D0000 +#define SOC_IROM_HIGH 0x40400000 +#define SOC_IRAM_LOW 0x40080000 +#define SOC_IRAM_HIGH 0x400A0000 +#define SOC_DROM_LOW 0x3F400000 +#define SOC_DROM_HIGH 0x3F800000 +#define SOC_RTC_IRAM_LOW 0x400C0000 +#define SOC_RTC_IRAM_HIGH 0x400C2000 +#define SOC_RTC_DATA_LOW 0x50000000 +#define SOC_RTC_DATA_HIGH 0x50002000 + +//First and last words of the D/IRAM region, for both the DRAM address as well as the IRAM alias. +#define SOC_DIRAM_IRAM_LOW 0x400A0000 +#define SOC_DIRAM_IRAM_HIGH 0x400BFFFC +#define SOC_DIRAM_DRAM_LOW 0x3FFE0000 +#define SOC_DIRAM_DRAM_HIGH 0x3FFFFFFC + +// Region of memory accessible via DMA. See esp_ptr_dma_capable(). +#define SOC_DMA_LOW 0x3FFAE000 +#define SOC_DMA_HIGH 0x40000000 + //Interrupt hardware source table //This table is decided by hardware, don't touch this. #define ETS_WIFI_MAC_INTR_SOURCE 0/**< interrupt of WiFi MAC, level*/ diff --git a/components/soc/esp32/soc_memory_layout.c b/components/soc/esp32/soc_memory_layout.c new file mode 100644 index 0000000000..19d2b8a95f --- /dev/null +++ b/components/soc/esp32/soc_memory_layout.c @@ -0,0 +1,180 @@ +// Copyright 2010-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#ifndef BOOTLOADER_BUILD + +#include +#include + +#include "soc/soc.h" +#include "soc/soc_memory_layout.h" +#include "esp_heap_caps.h" +#include "sdkconfig.h" + +/* Memory layout for ESP32 SoC */ + +/* +Memory type descriptors. These describe the capabilities of a type of memory in the SoC. Each type of memory +map consist of one or more regions in the address space. + +Each type contains an array of prioritised capabilities; types with later entries are only taken if earlier +ones can't fulfill the memory request. + +The prioritised capabilities work roughly like this: +- For a normal malloc (MALLOC_CAP_8BIT), give away the DRAM-only memory first, then pass off any dual-use IRAM regions, + finally eat into the application memory. +- For a malloc where 32-bit-aligned-only access is okay, first allocate IRAM, then DRAM, finally application IRAM. +- Application mallocs (PIDx) will allocate IRAM first, if possible, then DRAM. +- Most other malloc caps only fit in one region anyway. + +*/ +const soc_memory_type_desc_t soc_memory_types[] = { + //Type 0: Plain ole D-port RAM + { "DRAM", { MALLOC_CAP_DMA|MALLOC_CAP_8BIT, MALLOC_CAP_32BIT, 0 }, false, false}, + //Type 1: Plain ole D-port RAM which has an alias on the I-port + //(This DRAM is also the region used by ROM during startup) + { "D/IRAM", { 0, MALLOC_CAP_DMA|MALLOC_CAP_8BIT, MALLOC_CAP_32BIT|MALLOC_CAP_EXEC }, true, true}, + //Type 2: IRAM + { "IRAM", { MALLOC_CAP_EXEC|MALLOC_CAP_32BIT, 0, 0 }, false, false}, + //Type 3-8: PID 2-7 IRAM + { "PID2IRAM", { MALLOC_CAP_PID2, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false, false}, + { "PID3IRAM", { MALLOC_CAP_PID3, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false, false}, + { "PID4IRAM", { MALLOC_CAP_PID4, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false, false}, + { "PID5IRAM", { MALLOC_CAP_PID5, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false, false}, + { "PID6IRAM", { MALLOC_CAP_PID6, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false, false}, + { "PID7IRAM", { MALLOC_CAP_PID7, 0, MALLOC_CAP_EXEC|MALLOC_CAP_32BIT }, false, false}, + //Type 9-14: PID 2-7 DRAM + { "PID2DRAM", { MALLOC_CAP_PID2, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false, false}, + { "PID3DRAM", { MALLOC_CAP_PID3, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false, false}, + { "PID4DRAM", { MALLOC_CAP_PID4, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false, false}, + { "PID5DRAM", { MALLOC_CAP_PID5, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false, false}, + { "PID6DRAM", { MALLOC_CAP_PID6, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false, false}, + { "PID7DRAM", { MALLOC_CAP_PID7, MALLOC_CAP_8BIT, MALLOC_CAP_32BIT }, false, false}, + //Type 15: SPI SRAM data + { "SPISRAM", { MALLOC_CAP_SPISRAM, 0, MALLOC_CAP_DMA|MALLOC_CAP_8BIT|MALLOC_CAP_32BIT}, false, false}, +}; + +const size_t soc_memory_type_count = sizeof(soc_memory_types)/sizeof(soc_memory_type_desc_t); + +/* +Region descriptors. These describe all regions of memory available, and map them to a type in the above type. + +Because of requirements in the coalescing code which merges adjacent regions, this list should always be sorted +from low to high start address. +*/ +const soc_memory_region_t soc_memory_regions[] = { + { 0x3F800000, 0x20000, 15, 0}, //SPI SRAM, if available + { 0x3FFAE000, 0x2000, 0, 0}, //pool 16 <- used for rom code + { 0x3FFB0000, 0x8000, 0, 0}, //pool 15 <- if BT is enabled, used as BT HW shared memory + { 0x3FFB8000, 0x8000, 0, 0}, //pool 14 <- if BT is enabled, used data memory for BT ROM functions. + { 0x3FFC0000, 0x2000, 0, 0}, //pool 10-13, mmu page 0 + { 0x3FFC2000, 0x2000, 0, 0}, //pool 10-13, mmu page 1 + { 0x3FFC4000, 0x2000, 0, 0}, //pool 10-13, mmu page 2 + { 0x3FFC6000, 0x2000, 0, 0}, //pool 10-13, mmu page 3 + { 0x3FFC8000, 0x2000, 0, 0}, //pool 10-13, mmu page 4 + { 0x3FFCA000, 0x2000, 0, 0}, //pool 10-13, mmu page 5 + { 0x3FFCC000, 0x2000, 0, 0}, //pool 10-13, mmu page 6 + { 0x3FFCE000, 0x2000, 0, 0}, //pool 10-13, mmu page 7 + { 0x3FFD0000, 0x2000, 0, 0}, //pool 10-13, mmu page 8 + { 0x3FFD2000, 0x2000, 0, 0}, //pool 10-13, mmu page 9 + { 0x3FFD4000, 0x2000, 0, 0}, //pool 10-13, mmu page 10 + { 0x3FFD6000, 0x2000, 0, 0}, //pool 10-13, mmu page 11 + { 0x3FFD8000, 0x2000, 0, 0}, //pool 10-13, mmu page 12 + { 0x3FFDA000, 0x2000, 0, 0}, //pool 10-13, mmu page 13 + { 0x3FFDC000, 0x2000, 0, 0}, //pool 10-13, mmu page 14 + { 0x3FFDE000, 0x2000, 0, 0}, //pool 10-13, mmu page 15 + { 0x3FFE0000, 0x4000, 1, 0x400BC000}, //pool 9 blk 1 + { 0x3FFE4000, 0x4000, 1, 0x400B8000}, //pool 9 blk 0 + { 0x3FFE8000, 0x8000, 1, 0x400B0000}, //pool 8 <- can be remapped to ROM, used for MAC dump + { 0x3FFF0000, 0x8000, 1, 0x400A8000}, //pool 7 <- can be used for MAC dump + { 0x3FFF8000, 0x4000, 1, 0x400A4000}, //pool 6 blk 1 <- can be used as trace memory + { 0x3FFFC000, 0x4000, 1, 0x400A0000}, //pool 6 blk 0 <- can be used as trace memory + { 0x40070000, 0x8000, 2, 0}, //pool 0 + { 0x40078000, 0x8000, 2, 0}, //pool 1 + { 0x40080000, 0x2000, 2, 0}, //pool 2-5, mmu page 0 + { 0x40082000, 0x2000, 2, 0}, //pool 2-5, mmu page 1 + { 0x40084000, 0x2000, 2, 0}, //pool 2-5, mmu page 2 + { 0x40086000, 0x2000, 2, 0}, //pool 2-5, mmu page 3 + { 0x40088000, 0x2000, 2, 0}, //pool 2-5, mmu page 4 + { 0x4008A000, 0x2000, 2, 0}, //pool 2-5, mmu page 5 + { 0x4008C000, 0x2000, 2, 0}, //pool 2-5, mmu page 6 + { 0x4008E000, 0x2000, 2, 0}, //pool 2-5, mmu page 7 + { 0x40090000, 0x2000, 2, 0}, //pool 2-5, mmu page 8 + { 0x40092000, 0x2000, 2, 0}, //pool 2-5, mmu page 9 + { 0x40094000, 0x2000, 2, 0}, //pool 2-5, mmu page 10 + { 0x40096000, 0x2000, 2, 0}, //pool 2-5, mmu page 11 + { 0x40098000, 0x2000, 2, 0}, //pool 2-5, mmu page 12 + { 0x4009A000, 0x2000, 2, 0}, //pool 2-5, mmu page 13 + { 0x4009C000, 0x2000, 2, 0}, //pool 2-5, mmu page 14 + { 0x4009E000, 0x2000, 2, 0}, //pool 2-5, mmu page 15 +}; + +const size_t soc_memory_region_count = sizeof(soc_memory_regions)/sizeof(soc_memory_region_t); + + +/* Reserved memory regions + + These are removed from the soc_memory_regions array when heaps are created. + */ +const soc_reserved_region_t soc_reserved_regions[] = { + { 0x40070000, 0x40078000 }, //CPU0 cache region + { 0x40078000, 0x40080000 }, //CPU1 cache region + + /* Warning: The ROM stack is located in the 0x3ffe0000 area. We do not specifically disable that area here because + after the scheduler has started, the ROM stack is not used anymore by anything. We handle it instead by not allowing + any mallocs memory regions with the startup_stack flag set (these are the IRAM/DRAM region) until the + scheduler has started. + + The 0x3ffe0000 region also contains static RAM for various ROM functions. The following lines + reserve the regions for UART and ETSC, so these functions are usable. Libraries like xtos, which are + not usable in FreeRTOS anyway, are commented out in the linker script so they cannot be used; we + do not disable their memory regions here and they will be used as general purpose heap memory. + + Enabling the heap allocator for this region but disabling allocation here until FreeRTOS is started up + is a somewhat risky action in theory, because on initializing the allocator, the multi_heap implementation + will go and write metadata at the start and end of all regions. For the ESP32, these linked + list entries happen to end up in a region that is not touched by the stack; they can be placed safely there. + */ + + { 0x3ffe0000, 0x3ffe0440 }, //Reserve ROM PRO data region + { 0x3ffe4000, 0x3ffe4350 }, //Reserve ROM APP data region + +#if CONFIG_BT_ENABLED +#if CONFIG_BT_DRAM_RELEASE + { 0x3ffb0000, 0x3ffb3000 }, //Reserve BT data region + { 0x3ffb8000, 0x3ffbbb28 }, //Reserve BT data region + { 0x3ffbdb28, 0x3ffc0000 }, //Reserve BT data region +#else + { 0x3ffb0000, 0x3ffc0000 }, //Reserve BT hardware shared memory & BT data region +#endif + { 0x3ffae000, 0x3ffaff10 }, //Reserve ROM data region, inc region needed for BT ROM routines +#else + { 0x3ffae000, 0x3ffae2a0 }, //Reserve ROM data region +#endif + +#if CONFIG_MEMMAP_TRACEMEM +#if CONFIG_MEMMAP_TRACEMEM_TWOBANKS + { 0x3fff8000, 0x40000000 }, //Reserve trace mem region +#else + { 0x3fff8000, 0x3fffc000 }, //Reserve trace mem region +#endif +#endif + +#if 1 // SPI ram not supported yet + { 0x3f800000, 0x3f820000 }, //SPI SRAM not installed +#endif +}; + +const size_t soc_reserved_region_count = sizeof(soc_reserved_regions)/sizeof(soc_reserved_region_t); + +#endif diff --git a/components/soc/include/soc/soc_memory_layout.h b/components/soc/include/soc/soc_memory_layout.h new file mode 100644 index 0000000000..f35004e4f0 --- /dev/null +++ b/components/soc/include/soc/soc_memory_layout.h @@ -0,0 +1,64 @@ +// Copyright 2010-2016 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at + +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include +#include + +#include "soc/soc.h" + +#define SOC_MEMORY_TYPE_NO_PRIOS 3 + +/* Type descriptor holds a description for a particular type of memory on a particular SoC. + */ +typedef struct { + const char *name; ///< Name of this memory type + uint32_t caps[SOC_MEMORY_TYPE_NO_PRIOS]; ///< Capabilities for this memory type (as a prioritised set) + bool aliased_iram; ///< If true, this is data memory that is is also mapped in IRAM + bool startup_stack; ///< If true, memory of this type is used for ROM stack during startup +} soc_memory_type_desc_t; + +/* Constant table of tag descriptors for all this SoC's tags */ +extern const soc_memory_type_desc_t soc_memory_types[]; +extern const size_t soc_memory_type_count; + +/* Region descriptor holds a description for a particular region of memory on a particular SoC. + */ +typedef struct +{ + intptr_t start; ///< Start address of the region + size_t size; ///< Size of the region in bytes + size_t type; ///< Type of the region (index into soc_memory_types array) + intptr_t iram_address; ///< If non-zero, is equivalent address in IRAM +} soc_memory_region_t; + +extern const soc_memory_region_t soc_memory_regions[]; +extern const size_t soc_memory_region_count; + +/* Region descriptor holds a description for a particular region of + memory reserved on this SoC for a particular use (ie not available + for stack/heap usage.) */ +typedef struct +{ + intptr_t start; + intptr_t end; +} soc_reserved_region_t; + +extern const soc_reserved_region_t soc_reserved_regions[]; +extern const size_t soc_reserved_region_count; + +inline bool esp_ptr_dma_capable(const void *p) +{ + return (intptr_t)p >= SOC_DMA_LOW && (intptr_t)p < SOC_DMA_HIGH; +} diff --git a/docs/Doxyfile b/docs/Doxyfile index 9cd987fb58..e6af25c901 100644 --- a/docs/Doxyfile +++ b/docs/Doxyfile @@ -104,8 +104,8 @@ INPUT = \ ## System - API Reference ## ## Memory Allocation # - ../components/esp32/include/esp_heap_alloc_caps.h \ - ../components/freertos/include/freertos/heap_regions.h \ + ../components/heap/include/esp_heap_caps.h \ + ../components/heap/include/multi_heap.h \ ## Interrupt Allocation ../components/esp32/include/esp_intr_alloc.h \ ## Watchdogs diff --git a/docs/api-reference/system/mem_alloc.rst b/docs/api-reference/system/mem_alloc.rst index cba326ad18..ce231ad088 100644 --- a/docs/api-reference/system/mem_alloc.rst +++ b/docs/api-reference/system/mem_alloc.rst @@ -9,19 +9,15 @@ possible to connect external SPI flash to the ESP32; it's memory can be integrat the flash cache. In order to make use of all this memory, esp-idf has a capabilities-based memory allocator. Basically, if you want to have -memory with certain properties (for example, DMA-capable, accessible by a certain PID, or capable of executing code), you +memory with certain properties (for example, DMA-capable, or capable of executing code), you can create an OR-mask of the required capabilities and pass that to pvPortMallocCaps. For instance, the normal malloc -code internally allocates memory with ```pvPortMallocCaps(size, MALLOC_CAP_8BIT)``` in order to get data memory that is +code internally allocates memory with ```heap_caps_malloc(size, MALLOC_CAP_8BIT)``` in order to get data memory that is byte-addressable. -Because malloc uses this allocation system as well, memory allocated using pvPortMallocCaps can be freed by calling +Because malloc uses this allocation system as well, memory allocated using ```heap_caps_malloc()``` can be freed by calling the standard ```free()``` function. -Internally, this allocator is split in two pieces. The allocator in the FreeRTOS directory can allocate memory from -tagged regions: a tag is an integer value and every region of free memory has one of these tags. The esp32-specific -code initializes these regions with specific tags, and contains the logic to select applicable tags from the -capabilities given by the user. While shown in the public API, tags are used in the communication between the two parts -and should not be used directly. +The "soc" component contains a list of memory regions for the chip, along with the type of each memory (aka its tag) and the associated capabilities for that memory type. On startup, a separate heap is initialised for each contiguous memory region. The capabilities-based allocator chooses the best heap for each allocation, based on the requested capabilities. Special Uses ------------ @@ -39,4 +35,3 @@ API Reference - Heap Regions ---------------------------- .. include:: /_build/inc/heap_regions.inc -