diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash.c index fb2afe66db..c75f0d0eeb 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash.c @@ -12,10 +12,6 @@ #include "soc/soc_caps.h" #include "hal/efuse_ll.h" #include "hal/efuse_hal.h" -#if CONFIG_IDF_TARGET_ESP32P4 -//TODO: IDF-7516 -#include "esp32p4/rom/cache.h" -#endif #if CONFIG_IDF_TARGET_ESP32 # include "soc/spi_struct.h" @@ -128,6 +124,7 @@ esp_err_t bootloader_flash_erase_range(uint32_t start_addr, uint32_t size) #include "hal/mmu_hal.h" #include "hal/mmu_ll.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #if CONFIG_IDF_TARGET_ESP32S3 #include "esp32s3/rom/opi_flash.h" @@ -205,7 +202,7 @@ const void *bootloader_mmap(uint32_t src_paddr, uint32_t size) Cache_Read_Disable(0); Cache_Flush(0); #else - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif //---------------Do mapping------------------------ @@ -238,15 +235,10 @@ const void *bootloader_mmap(uint32_t src_paddr, uint32_t size) #if CONFIG_IDF_TARGET_ESP32 Cache_Read_Enable(0); #else -#if CONFIG_IDF_TARGET_ESP32P4 - /** - * TODO: IDF-7516 - * we need to invalidate l1 dcache to make each mmap clean - * to that vaddr - */ - Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE, MMU_BLOCK0_VADDR, actual_mapped_len); +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE + cache_ll_invalidate_addr(CACHE_LL_LEVEL_ALL, CACHE_TYPE_ALL, CACHE_LL_ID_ALL, MMU_BLOCK0_VADDR, actual_mapped_len); #endif - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif mapped = true; @@ -263,7 +255,7 @@ void bootloader_munmap(const void *mapping) Cache_Flush(0); mmu_init(0); #else - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); mmu_hal_unmap_all(); #endif mapped = false; @@ -291,7 +283,7 @@ static esp_err_t bootloader_flash_read_no_decrypt(size_t src_addr, void *dest, s Cache_Read_Disable(0); Cache_Flush(0); #else - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif esp_rom_spiflash_result_t r = esp_rom_spiflash_read(src_addr, dest, size); @@ -299,7 +291,7 @@ static esp_err_t bootloader_flash_read_no_decrypt(size_t src_addr, void *dest, s #if CONFIG_IDF_TARGET_ESP32 Cache_Read_Enable(0); #else - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif return spi_to_esp_err(r); @@ -322,7 +314,7 @@ static esp_err_t bootloader_flash_read_allow_decrypt(size_t src_addr, void *dest Cache_Read_Disable(0); Cache_Flush(0); #else - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif //---------------Do mapping------------------------ @@ -341,11 +333,10 @@ static esp_err_t bootloader_flash_read_allow_decrypt(size_t src_addr, void *dest #if CONFIG_IDF_TARGET_ESP32 Cache_Read_Enable(0); #else -#if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7516 - Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE, FLASH_READ_VADDR, actual_mapped_len); +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE + cache_ll_invalidate_addr(CACHE_LL_LEVEL_ALL, CACHE_TYPE_ALL, CACHE_LL_ID_ALL, MMU_BLOCK0_VADDR, actual_mapped_len); #endif - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif } map_ptr = (uint32_t *)(FLASH_READ_VADDR + (word_src - map_at)); @@ -468,9 +459,9 @@ void bootloader_flash_32bits_address_map_enable(esp_rom_spiflash_read_mode_t fla assert(false); break; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); esp_rom_opiflash_cache_mode_config(flash_mode, &cache_rd); - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } #endif diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c2.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c2.c index 7a3c1f3b50..85f7c16654 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c2.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c2.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -23,8 +23,9 @@ #include "bootloader_flash_priv.h" #include "bootloader_init.h" #include "hal/mmu_hal.h" -#include "hal/cache_hal.h" #include "hal/mmu_ll.h" +#include "hal/cache_hal.h" +#include "hal/cache_ll.h" #define FLASH_IO_MATRIX_DUMMY_40M 0 #define FLASH_IO_MATRIX_DUMMY_80M 0 @@ -126,10 +127,10 @@ static void update_flash_config(const esp_image_header_t *bootloader_hdr) default: size = 2; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); // Set flash chip size esp_rom_spiflash_config_param(rom_spiflash_legacy_data->chip.device_id, size * 0x100000, 0x10000, 0x1000, 0x100, 0xffff); // TODO: set mode - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } static void print_flash_info(const esp_image_header_t *bootloader_hdr) diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c3.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c3.c index cbbb81189a..fb1f2329d1 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c3.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c3.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2020-2022 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -24,8 +24,9 @@ #include "bootloader_flash_priv.h" #include "bootloader_init.h" #include "hal/mmu_hal.h" -#include "hal/cache_hal.h" #include "hal/mmu_ll.h" +#include "hal/cache_hal.h" +#include "hal/cache_ll.h" #define FLASH_IO_MATRIX_DUMMY_40M 0 #define FLASH_IO_MATRIX_DUMMY_80M 0 @@ -137,10 +138,10 @@ static void update_flash_config(const esp_image_header_t *bootloader_hdr) default: size = 2; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); // Set flash chip size esp_rom_spiflash_config_param(rom_spiflash_legacy_data->chip.device_id, size * 0x100000, 0x10000, 0x1000, 0x100, 0xffff); // TODO: set mode - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } static void print_flash_info(const esp_image_header_t *bootloader_hdr) diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c6.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c6.c index 7b725a4862..6a1e62459c 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c6.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32c6.c @@ -24,8 +24,9 @@ #include "bootloader_flash_priv.h" #include "bootloader_init.h" #include "hal/mmu_hal.h" -#include "hal/cache_hal.h" #include "hal/mmu_ll.h" +#include "hal/cache_hal.h" +#include "hal/cache_ll.h" void bootloader_flash_update_id() { @@ -102,10 +103,10 @@ static void update_flash_config(const esp_image_header_t *bootloader_hdr) default: size = 2; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); // Set flash chip size esp_rom_spiflash_config_param(rom_spiflash_legacy_data->chip.device_id, size * 0x100000, 0x10000, 0x1000, 0x100, 0xffff); // TODO: set mode - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } static void print_flash_info(const esp_image_header_t *bootloader_hdr) diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32h2.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32h2.c index ef83e6d870..b4b1bb94c3 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32h2.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32h2.c @@ -24,8 +24,9 @@ #include "bootloader_flash_priv.h" #include "bootloader_init.h" #include "hal/mmu_hal.h" -#include "hal/cache_hal.h" #include "hal/mmu_ll.h" +#include "hal/cache_hal.h" +#include "hal/cache_ll.h" #include "soc/pcr_reg.h" void bootloader_flash_update_id() @@ -109,10 +110,10 @@ static void update_flash_config(const esp_image_header_t *bootloader_hdr) default: size = 2; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); // Set flash chip size esp_rom_spiflash_config_param(rom_spiflash_legacy_data->chip.device_id, size * 0x100000, 0x10000, 0x1000, 0x100, 0xffff); // TODO: set mode - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } static void print_flash_info(const esp_image_header_t *bootloader_hdr) diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32p4.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32p4.c index b2fb0e385d..14887abe94 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32p4.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32p4.c @@ -18,8 +18,9 @@ #include "bootloader_flash_priv.h" #include "bootloader_init.h" #include "hal/mmu_hal.h" -#include "hal/cache_hal.h" #include "hal/mmu_ll.h" +#include "hal/cache_hal.h" +#include "hal/cache_ll.h" void bootloader_flash_update_id() { @@ -96,10 +97,10 @@ static void update_flash_config(const esp_image_header_t *bootloader_hdr) default: size = 2; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); // Set flash chip size esp_rom_spiflash_config_param(rom_spiflash_legacy_data->chip.device_id, size * 0x100000, 0x10000, 0x1000, 0x100, 0xffff); // TODO: set mode - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } static void print_flash_info(const esp_image_header_t *bootloader_hdr) diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s2.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s2.c index d6479a1a18..a1855bb4c4 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s2.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s2.c @@ -1,5 +1,5 @@ /* - * SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2019-2023 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -23,7 +23,9 @@ #include "bootloader_common.h" #include "bootloader_init.h" #include "hal/mmu_hal.h" +#include "hal/mmu_ll.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #define FLASH_IO_MATRIX_DUMMY_40M 0 #define FLASH_IO_MATRIX_DUMMY_80M 0 @@ -150,12 +152,12 @@ static void update_flash_config(const esp_image_header_t *bootloader_hdr) default: size = 2; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); // Set flash chip size esp_rom_spiflash_config_param(g_rom_flashchip.device_id, size * 0x100000, 0x10000, 0x1000, 0x100, 0xffff); // TODO: set mode // TODO: set frequency - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } static void print_flash_info(const esp_image_header_t *bootloader_hdr) diff --git a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s3.c b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s3.c index f995cb41b8..1b0caa3020 100644 --- a/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s3.c +++ b/components/bootloader_support/bootloader_flash/src/bootloader_flash_config_esp32s3.c @@ -24,7 +24,9 @@ #include "bootloader_flash.h" #include "bootloader_init.h" #include "hal/mmu_hal.h" +#include "hal/mmu_ll.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #define FLASH_IO_MATRIX_DUMMY_40M 0 #define FLASH_IO_MATRIX_DUMMY_80M 0 @@ -157,12 +159,12 @@ static void update_flash_config(const esp_image_header_t *bootloader_hdr) size = 2; } - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); // Set flash chip size esp_rom_spiflash_config_param(g_rom_flashchip.device_id, size * 0x100000, 0x10000, 0x1000, 0x100, 0xffff); // TODO: set mode // TODO: set frequency - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } static void print_flash_info(const esp_image_header_t *bootloader_hdr) diff --git a/components/bootloader_support/src/bootloader_utility.c b/components/bootloader_support/src/bootloader_utility.c index e591189fa3..ea3f37531b 100644 --- a/components/bootloader_support/src/bootloader_utility.c +++ b/components/bootloader_support/src/bootloader_utility.c @@ -836,7 +836,7 @@ static void set_cache_and_start_app( Cache_Read_Disable(0); Cache_Flush(0); #else - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif //reset MMU table first mmu_hal_unmap_all(); @@ -896,7 +896,7 @@ static void set_cache_and_start_app( // Application will need to do Cache_Flush(1) and Cache_Read_Enable(1) Cache_Read_Enable(0); #else - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif ESP_LOGD(TAG, "start: 0x%08"PRIx32, entry_addr); diff --git a/components/bootloader_support/src/esp_image_format.c b/components/bootloader_support/src/esp_image_format.c index 26a1e67edf..e45c6c2d7e 100644 --- a/components/bootloader_support/src/esp_image_format.c +++ b/components/bootloader_support/src/esp_image_format.c @@ -21,6 +21,7 @@ #include "esp_rom_sys.h" #include "bootloader_memory_utils.h" #include "soc/soc_caps.h" +#include "hal/cache_ll.h" #if CONFIG_IDF_TARGET_ESP32 #include "esp32/rom/secure_boot.h" #elif CONFIG_IDF_TARGET_ESP32S2 @@ -41,7 +42,6 @@ #elif CONFIG_IDF_TARGET_ESP32P4 #include "esp32p4/rom/rtc.h" #include "esp32p4/rom/secure_boot.h" -#include "esp32p4/rom/cache.h" #endif #define ALIGN_UP(num, align) (((num) + ((align) - 1)) & ~((align) - 1)) @@ -236,9 +236,8 @@ static esp_err_t image_load(esp_image_load_mode_t mode, const esp_partition_pos_ } } } -#if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7516 - Cache_WriteBack_All(CACHE_MAP_L1_DCACHE); +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE + cache_ll_writeback_all(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA, CACHE_LL_ID_ALL); #endif } #endif // BOOTLOADER_BUILD @@ -675,10 +674,9 @@ static esp_err_t process_segment_data(intptr_t load_addr, uint32_t data_addr, ui MIN(SHA_CHUNK, data_len - i)); } } -#if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7516 +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE if (do_load && esp_ptr_in_diram_iram((uint32_t *)load_addr)) { - Cache_WriteBack_All(CACHE_MAP_L1_DCACHE); + cache_ll_writeback_all(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA, CACHE_LL_ID_ALL); } #endif diff --git a/components/driver/rmt/rmt_rx.c b/components/driver/rmt/rmt_rx.c index 9ee7d1ad16..eb490d17f8 100644 --- a/components/driver/rmt/rmt_rx.c +++ b/components/driver/rmt/rmt_rx.c @@ -353,7 +353,7 @@ esp_err_t rmt_receive(rmt_channel_handle_t channel, void *buffer, size_t buffer_ ESP_RETURN_ON_FALSE(esp_ptr_internal(buffer), ESP_ERR_INVALID_ARG, TAG, "buffer must locate in internal RAM for DMA use"); #if CONFIG_IDF_TARGET_ESP32P4 - uint32_t data_cache_line_mask = cache_hal_get_cache_line_size(CACHE_TYPE_DATA) - 1; + uint32_t data_cache_line_mask = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA) - 1; ESP_RETURN_ON_FALSE(((uintptr_t)buffer & data_cache_line_mask) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer must be aligned to cache line size"); ESP_RETURN_ON_FALSE((buffer_size & data_cache_line_mask) == 0, ESP_ERR_INVALID_ARG, TAG, "buffer size must be aligned to cache line size"); #endif diff --git a/components/esp_hw_support/CMakeLists.txt b/components/esp_hw_support/CMakeLists.txt index c269eba0f3..342e54281e 100644 --- a/components/esp_hw_support/CMakeLists.txt +++ b/components/esp_hw_support/CMakeLists.txt @@ -34,7 +34,8 @@ if(NOT BOOTLOADER_BUILD) "sar_periph_ctrl_common.c" "port/${target}/io_mux.c" "port/${target}/esp_clk_tree.c" - "port/esp_clk_tree_common.c") + "port/esp_clk_tree_common.c" + "dma/esp_dma_utils.c") if(CONFIG_SOC_ADC_SUPPORTED) list(APPEND srcs "adc_share_hw_ctrl.c") @@ -51,6 +52,8 @@ if(NOT BOOTLOADER_BUILD) # [refactor-todo]: requires "driver" for GPIO and RTC (by sleep_gpio and sleep_modes) list(APPEND priv_requires driver esp_timer) + list(APPEND priv_requires esp_mm) + if(CONFIG_IDF_TARGET_ESP32 OR CONFIG_IDF_TARGET_ESP32S2) list(APPEND srcs "rtc_wdt.c") endif() diff --git a/components/esp_hw_support/dma/async_memcpy_gdma.c b/components/esp_hw_support/dma/async_memcpy_gdma.c index a19f61cefb..d460ca23d0 100644 --- a/components/esp_hw_support/dma/async_memcpy_gdma.c +++ b/components/esp_hw_support/dma/async_memcpy_gdma.c @@ -21,6 +21,7 @@ #include "esp_async_memcpy_priv.h" #include "hal/dma_types.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #include "rom/cache.h" static const char *TAG = "async_mcp.gdma"; @@ -161,13 +162,20 @@ static esp_err_t esp_async_memcpy_install_gdma_template(const async_memcpy_confi portMUX_INITIALIZE(&mcp_gdma->spin_lock); atomic_init(&mcp_gdma->fsm, MCP_FSM_IDLE); mcp_gdma->gdma_bus_id = gdma_bus_id; + + uint32_t psram_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA); + uint32_t sram_cache_line_size = 0; +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE + sram_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_INT_MEM, CACHE_TYPE_DATA); +#endif + // if the psram_trans_align is configured to zero, we should fall back to use the data cache line size - uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA); - size_t psram_trans_align = MAX(data_cache_line_size, config->psram_trans_align); - size_t trans_align = MAX(config->sram_trans_align, psram_trans_align); + size_t psram_trans_align = MAX(psram_cache_line_size, config->psram_trans_align); + size_t sram_trans_align = MAX(sram_cache_line_size, config->sram_trans_align); + size_t trans_align = MAX(sram_trans_align, psram_trans_align); mcp_gdma->max_single_dma_buffer = ALIGN_DOWN(DMA_DESCRIPTOR_BUFFER_MAX_SIZE, trans_align); mcp_gdma->psram_trans_align = psram_trans_align; - mcp_gdma->sram_trans_align = config->sram_trans_align; + mcp_gdma->sram_trans_align = sram_trans_align; mcp_gdma->parent.del = mcp_gdma_del; mcp_gdma->parent.memcpy = mcp_gdma_memcpy; #if SOC_GDMA_SUPPORT_ETM @@ -322,6 +330,7 @@ static bool check_buffer_aligned(async_memcpy_gdma_context_t *mcp_gdma, void *sr { bool valid = true; uint32_t align_mask = 0; + if (esp_ptr_external_ram(dst)) { if (mcp_gdma->psram_trans_align) { align_mask = mcp_gdma->psram_trans_align - 1; @@ -331,12 +340,7 @@ static bool check_buffer_aligned(async_memcpy_gdma_context_t *mcp_gdma, void *sr align_mask = mcp_gdma->sram_trans_align - 1; } } -#if CONFIG_IDF_TARGET_ESP32P4 - uint32_t data_cache_line_mask = cache_hal_get_cache_line_size(CACHE_TYPE_DATA) - 1; - if (data_cache_line_mask > align_mask) { - align_mask = data_cache_line_mask; - } -#endif + // destination address must be cache line aligned valid = valid && (((uint32_t)dst & align_mask) == 0); valid = valid && ((n & align_mask) == 0); diff --git a/components/esp_hw_support/dma/esp_dma_utils.c b/components/esp_hw_support/dma/esp_dma_utils.c new file mode 100644 index 0000000000..4309a69cc4 --- /dev/null +++ b/components/esp_hw_support/dma/esp_dma_utils.c @@ -0,0 +1,69 @@ +/* + * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include "sdkconfig.h" +#include "esp_check.h" +#include "esp_log.h" +#include "esp_heap_caps.h" +#include "esp_dma_utils.h" +#include "esp_private/esp_cache_private.h" +#include "soc/soc_caps.h" + +static const char *TAG = "dma_utils"; +_Static_assert(ESP_DMA_MALLOC_FLAG_PSRAM == ESP_CACHE_MALLOC_FLAG_PSRAM); + +#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1)) + + +esp_err_t esp_dma_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size) +{ + ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer"); + + esp_err_t ret = ESP_OK; + +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE + ret = esp_cache_aligned_malloc(size, flags | ESP_CACHE_MALLOC_FLAG_DMA, out_ptr, actual_size); +#else + if (flags & ESP_DMA_MALLOC_FLAG_PSRAM) { + ret = esp_cache_aligned_malloc(size, flags | ESP_CACHE_MALLOC_FLAG_DMA, out_ptr, actual_size); + } else { + size = ALIGN_UP_BY(size, 4); + void *ptr = heap_caps_aligned_alloc(4, size, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + ESP_RETURN_ON_FALSE_ISR(ptr, ESP_ERR_NO_MEM, TAG, "no enough heap memory"); + *out_ptr = ptr; + if (actual_size) { + *actual_size = size; + } + } +#endif + + return ret; +} + + +esp_err_t esp_dma_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size) +{ + ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer"); + + esp_err_t ret = ESP_FAIL; + size_t size_bytes = 0; + bool ovf = false; + + ovf = __builtin_mul_overflow(n, size, &size_bytes); + ESP_RETURN_ON_FALSE_ISR(!ovf, ESP_ERR_INVALID_ARG, TAG, "wrong size, total size overflow"); + + void *ptr = NULL; + ret = esp_dma_malloc(size_bytes, flags, &ptr, actual_size); + if (ret == ESP_OK) { + memset(ptr, 0, size_bytes); + *out_ptr = ptr; + } + + return ret; +} diff --git a/components/esp_hw_support/dma/gdma.c b/components/esp_hw_support/dma/gdma.c index fd7aa1686d..feca5dfe53 100644 --- a/components/esp_hw_support/dma/gdma.c +++ b/components/esp_hw_support/dma/gdma.c @@ -45,6 +45,7 @@ #include "esp_private/periph_ctrl.h" #include "gdma_priv.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" static const char *TAG = "gdma"; @@ -357,7 +358,7 @@ esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_t ESP_RETURN_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, TAG, "invalid sram alignment: %zu", sram_alignment); - uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA); + uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA); if (psram_alignment == 0) { // fall back to use the same size of the psram data cache line size psram_alignment = data_cache_line_size; diff --git a/components/esp_hw_support/include/esp_dma_utils.h b/components/esp_hw_support/include/esp_dma_utils.h new file mode 100644 index 0000000000..3d4435b7cc --- /dev/null +++ b/components/esp_hw_support/include/esp_dma_utils.h @@ -0,0 +1,57 @@ +/* + * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ +#pragma once + +#include +#include +#include "esp_err.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * DMA malloc flags + */ +/** + * @brief Memory is in PSRAM + */ +#define ESP_DMA_MALLOC_FLAG_PSRAM BIT(0) + +/** + * @brief Helper function for malloc a DMA capable memory buffer + * + * @param[in] size Size in bytes, the amount of memory to allocate + * @param[in] flags Flags, see `ESP_DMA_MALLOC_FLAG_x` + * @param[out] out_ptr A pointer to the memory allocated successfully + * @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the DMA alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value. + * + * @return + * - ESP_OK: + * - ESP_ERR_INVALID_ARG: Invalid argument + * - ESP_ERR_NO_MEM: No enough memory for allocation + */ +esp_err_t esp_dma_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size); + +/** + * @brief Helper function for calloc a DMA capable memory buffer + * + * @param[in] n Number of continuing chunks of memory to allocate + * @param[in] size Size of one chunk, in bytes + * @param[in] flags Flags, see `ESP_DMA_MALLOC_FLAG_x` + * @param[out] out_ptr A pointer to the memory allocated successfully + * @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the cache alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value. + * + * @return + * - ESP_OK: + * - ESP_ERR_INVALID_ARG: Invalid argument + * - ESP_ERR_NO_MEM: No enough memory for allocation + */ +esp_err_t esp_dma_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size); + +#ifdef __cplusplus +} +#endif diff --git a/components/esp_hw_support/mspi_timing_tuning.c b/components/esp_hw_support/mspi_timing_tuning.c index 2b524fb7e3..fd05a33000 100644 --- a/components/esp_hw_support/mspi_timing_tuning.c +++ b/components/esp_hw_support/mspi_timing_tuning.c @@ -15,6 +15,7 @@ #include "soc/soc.h" #include "hal/spi_flash_hal.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #include "esp_private/mspi_timing_tuning.h" #include "mspi_timing_config.h" #include "mspi_timing_by_mspi_delay.h" @@ -473,7 +474,7 @@ void mspi_timing_change_speed_mode_cache_safe(bool switch_down) * for preventing concurrent from MSPI to external memory */ #if SOC_CACHE_FREEZE_SUPPORTED - cache_hal_freeze(CACHE_TYPE_ALL); + cache_hal_freeze(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif //#if SOC_CACHE_FREEZE_SUPPORTED if (switch_down) { @@ -485,7 +486,7 @@ void mspi_timing_change_speed_mode_cache_safe(bool switch_down) } #if SOC_CACHE_FREEZE_SUPPORTED - cache_hal_unfreeze(CACHE_TYPE_ALL); + cache_hal_unfreeze(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif //#if SOC_CACHE_FREEZE_SUPPORTED } diff --git a/components/esp_hw_support/sleep_modes.c b/components/esp_hw_support/sleep_modes.c index 6a7a12bb13..23e625bf95 100644 --- a/components/esp_hw_support/sleep_modes.c +++ b/components/esp_hw_support/sleep_modes.c @@ -43,6 +43,7 @@ #include "regi2c_ctrl.h" //For `REGI2C_ANA_CALI_PD_WORKAROUND`, temp #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #include "hal/wdt_hal.h" #include "hal/uart_hal.h" #if SOC_TOUCH_SENSOR_SUPPORTED @@ -403,7 +404,7 @@ static int s_cache_suspend_cnt = 0; static void IRAM_ATTR suspend_cache(void) { s_cache_suspend_cnt++; if (s_cache_suspend_cnt == 1) { - cache_hal_suspend(CACHE_TYPE_ALL); + cache_hal_suspend(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } } @@ -412,7 +413,7 @@ static void IRAM_ATTR resume_cache(void) { s_cache_suspend_cnt--; assert(s_cache_suspend_cnt >= 0 && DRAM_STR("cache resume doesn't match suspend ops")); if (s_cache_suspend_cnt == 0) { - cache_hal_resume(CACHE_TYPE_ALL); + cache_hal_resume(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } } diff --git a/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt b/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt index 4088442971..54b2398209 100644 --- a/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt +++ b/components/esp_hw_support/test_apps/dma/main/CMakeLists.txt @@ -11,5 +11,5 @@ endif() # In order for the cases defined by `TEST_CASE` to be linked into the final elf, # the component can be registered as WHOLE_ARCHIVE idf_component_register(SRCS ${srcs} - PRIV_REQUIRES unity + PRIV_REQUIRES unity esp_mm WHOLE_ARCHIVE) diff --git a/components/esp_hw_support/test_apps/dma/main/test_gdma.c b/components/esp_hw_support/test_apps/dma/main/test_gdma.c index ba9d2eb333..6d3d909d1f 100644 --- a/components/esp_hw_support/test_apps/dma/main/test_gdma.c +++ b/components/esp_hw_support/test_apps/dma/main/test_gdma.c @@ -16,7 +16,7 @@ #include "soc/soc_caps.h" #include "hal/gdma_ll.h" #include "hal/cache_ll.h" -#include "rom/cache.h" +#include "esp_cache.h" TEST_CASE("GDMA channel allocation", "[GDMA]") { @@ -187,7 +187,7 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl uint8_t *dst_data = dst_buf + 64; // prepare the source data - for (int i = 0; i < 100; i++) { + for (int i = 0; i < 128; i++) { src_data[i] = i; } @@ -198,41 +198,41 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl dma_descriptor_align8_t *rx_descs_noncache = (dma_descriptor_align8_t *)(CACHE_LL_L2MEM_NON_CACHE_ADDR(rx_descs)); tx_descs_noncache[0].buffer = src_data; - tx_descs_noncache[0].dw0.size = 50; - tx_descs_noncache[0].dw0.length = 50; + tx_descs_noncache[0].dw0.size = 64; + tx_descs_noncache[0].dw0.length = 64; tx_descs_noncache[0].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; tx_descs_noncache[0].dw0.suc_eof = 0; tx_descs_noncache[0].next = &tx_descs[1]; // Note, the DMA doesn't recognize a non-cacheable address, here must be the cached address - tx_descs_noncache[1].buffer = src_data + 50; - tx_descs_noncache[1].dw0.size = 50; - tx_descs_noncache[1].dw0.length = 50; + tx_descs_noncache[1].buffer = src_data + 64; + tx_descs_noncache[1].dw0.size = 64; + tx_descs_noncache[1].dw0.length = 64; tx_descs_noncache[1].dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; tx_descs_noncache[1].dw0.suc_eof = 1; tx_descs_noncache[1].next = NULL; rx_descs_noncache->buffer = dst_data; - rx_descs_noncache->dw0.size = 100; + rx_descs_noncache->dw0.size = 128; rx_descs_noncache->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; rx_descs_noncache->dw0.suc_eof = 1; rx_descs_noncache->next = NULL; #else tx_descs->buffer = src_data; - tx_descs->dw0.size = 100; - tx_descs->dw0.length = 100; + tx_descs->dw0.size = 128; + tx_descs->dw0.length = 128; tx_descs->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; tx_descs->dw0.suc_eof = 1; tx_descs->next = NULL; rx_descs->buffer = dst_data; - rx_descs->dw0.size = 100; + rx_descs->dw0.size = 128; rx_descs->dw0.owner = DMA_DESCRIPTOR_BUFFER_OWNER_DMA; rx_descs->next = NULL; #endif #if CONFIG_IDF_TARGET_ESP32P4 // do write-back for the source data because it's in the cache - Cache_WriteBack_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)src_data, 100); + TEST_ESP_OK(esp_cache_msync((void *)src_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_C2M)); #endif TEST_ESP_OK(gdma_start(rx_chan, (intptr_t)rx_descs)); @@ -242,14 +242,14 @@ static void test_gdma_m2m_mode(gdma_channel_handle_t tx_chan, gdma_channel_handl #if CONFIG_IDF_TARGET_ESP32P4 // the destination data are not reflected to the cache, so do an invalidate to ask the cache load new data - Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE, (uint32_t)dst_data, 100); + TEST_ESP_OK(esp_cache_msync((void *)dst_data, 128, ESP_CACHE_MSYNC_FLAG_DIR_M2C)); #endif // check the DMA descriptor write-back feature TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, tx_descs[0].dw0.owner); TEST_ASSERT_EQUAL(DMA_DESCRIPTOR_BUFFER_OWNER_CPU, rx_descs[0].dw0.owner); - for (int i = 0; i < 100; i++) { + for (int i = 0; i < 128; i++) { TEST_ASSERT_EQUAL(i, dst_data[i]); } free((void *)src_buf); diff --git a/components/esp_lcd/src/esp_lcd_panel_io_i80.c b/components/esp_lcd/src/esp_lcd_panel_io_i80.c index 39cd7fd65a..db86c75f03 100644 --- a/components/esp_lcd/src/esp_lcd_panel_io_i80.c +++ b/components/esp_lcd/src/esp_lcd_panel_io_i80.c @@ -30,6 +30,7 @@ #include "hal/dma_types.h" #include "hal/gpio_hal.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #include "esp_private/gdma.h" #include "driver/gpio.h" #include "esp_private/periph_ctrl.h" @@ -491,7 +492,7 @@ static esp_err_t panel_io_i80_tx_color(esp_lcd_panel_io_t *io, int lcd_cmd, cons trans_desc->user_ctx = i80_device->user_ctx; if (esp_ptr_external_ram(color)) { - uint32_t dcache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA); + uint32_t dcache_line_size = cache_hal_get_cache_line_size(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_DATA); // flush frame buffer from cache to the physical PSRAM // note the esp_cache_msync function will check the alignment of the address and size, make sure they're aligned to current cache line size esp_cache_msync((void *)ALIGN_DOWN((intptr_t)color, dcache_line_size), ALIGN_UP(color_size, dcache_line_size), 0); diff --git a/components/esp_mm/esp_cache.c b/components/esp_mm/esp_cache.c index 4784bfad90..38ee4ca687 100644 --- a/components/esp_mm/esp_cache.c +++ b/components/esp_mm/esp_cache.c @@ -6,57 +6,150 @@ #include #include +#include #include "sdkconfig.h" #include "esp_check.h" #include "esp_log.h" +#include "esp_heap_caps.h" #include "esp_rom_caps.h" #include "soc/soc_caps.h" #include "hal/mmu_hal.h" #include "hal/cache_hal.h" +#include "hal/cache_ll.h" #include "esp_cache.h" +#include "esp_private/esp_cache_private.h" #include "esp_private/critical_section.h" static const char *TAG = "cache"; + +#define ALIGN_UP_BY(num, align) (((num) + ((align) - 1)) & ~((align) - 1)) + DEFINE_CRIT_SECTION_LOCK_STATIC(s_spinlock); esp_err_t esp_cache_msync(void *addr, size_t size, int flags) { ESP_RETURN_ON_FALSE_ISR(addr, ESP_ERR_INVALID_ARG, TAG, "null pointer"); - ESP_RETURN_ON_FALSE_ISR(mmu_hal_check_valid_ext_vaddr_region(0, (uint32_t)addr, size, MMU_VADDR_DATA), ESP_ERR_INVALID_ARG, TAG, "invalid address"); + + uint32_t addr_end = 0; + bool ovf = __builtin_add_overflow((uint32_t)addr, size, &addr_end); + ESP_EARLY_LOGV(TAG, "addr_end: 0x%x\n", addr_end); + ESP_RETURN_ON_FALSE_ISR(!ovf, ESP_ERR_INVALID_ARG, TAG, "wrong size, total size overflow"); + bool both_dir = (flags & ESP_CACHE_MSYNC_FLAG_DIR_C2M) && (flags & ESP_CACHE_MSYNC_FLAG_DIR_M2C); - ESP_RETURN_ON_FALSE_ISR(!both_dir, ESP_ERR_INVALID_ARG, TAG, "both C2M and M2C directions are selected, you should only select one"); - uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA); - if ((flags & ESP_CACHE_MSYNC_FLAG_UNALIGNED) == 0) { - bool aligned_addr = (((uint32_t)addr % data_cache_line_size) == 0) && ((size % data_cache_line_size) == 0); - ESP_RETURN_ON_FALSE_ISR(aligned_addr, ESP_ERR_INVALID_ARG, TAG, "start address, end address or the size is(are) not aligned with the data cache line size (%d)B", data_cache_line_size); - } + bool both_type = (flags & ESP_CACHE_MSYNC_FLAG_TYPE_DATA) && (flags & ESP_CACHE_MSYNC_FLAG_TYPE_INST); + ESP_RETURN_ON_FALSE_ISR(!both_dir && !both_type, ESP_ERR_INVALID_ARG, TAG, "both C2M and M2C directions, or both data and instruction type are selected, you should only select one direction or one type"); uint32_t vaddr = (uint32_t)addr; + bool valid = false; + uint32_t cache_level = 0; + uint32_t cache_id = 0; + valid = cache_hal_vaddr_to_cache_level_id(vaddr, size, &cache_level, &cache_id); + ESP_RETURN_ON_FALSE_ISR(valid, ESP_ERR_INVALID_ARG, TAG, "invalid addr or null pointer"); + + cache_type_t cache_type = CACHE_TYPE_DATA; + if (flags & ESP_CACHE_MSYNC_FLAG_TYPE_INST) { + cache_type = CACHE_TYPE_INSTRUCTION; + } + uint32_t cache_line_size = cache_hal_get_cache_line_size(cache_level, cache_type); + if ((flags & ESP_CACHE_MSYNC_FLAG_UNALIGNED) == 0) { + bool aligned_addr = (((uint32_t)addr % cache_line_size) == 0) && ((size % cache_line_size) == 0); + ESP_RETURN_ON_FALSE_ISR(aligned_addr, ESP_ERR_INVALID_ARG, TAG, "start address: 0x%x, or the size: 0x%x is(are) not aligned with cache line size (0x%x)B", (uint32_t)addr, size, cache_line_size); + } + if (flags & ESP_CACHE_MSYNC_FLAG_DIR_M2C) { - ESP_EARLY_LOGD(TAG, "M2C DIR"); + ESP_EARLY_LOGV(TAG, "M2C DIR"); esp_os_enter_critical_safe(&s_spinlock); //Add preload feature / flag here, IDF-7800 - cache_hal_invalidate_addr(vaddr, size); + valid = cache_hal_invalidate_addr(vaddr, size); esp_os_exit_critical_safe(&s_spinlock); - + assert(valid); } else { - ESP_EARLY_LOGD(TAG, "C2M DIR"); + ESP_EARLY_LOGV(TAG, "C2M DIR"); + if (flags & ESP_CACHE_MSYNC_FLAG_TYPE_INST) { + ESP_RETURN_ON_FALSE_ISR(false, ESP_ERR_INVALID_ARG, TAG, "C2M direction doesn't support instruction type"); + } #if SOC_CACHE_WRITEBACK_SUPPORTED esp_os_enter_critical_safe(&s_spinlock); - cache_hal_writeback_addr(vaddr, size); + valid = cache_hal_writeback_addr(vaddr, size); esp_os_exit_critical_safe(&s_spinlock); + assert(valid); if (flags & ESP_CACHE_MSYNC_FLAG_INVALIDATE) { esp_os_enter_critical_safe(&s_spinlock); - cache_hal_invalidate_addr(vaddr, size); + valid &= cache_hal_invalidate_addr(vaddr, size); esp_os_exit_critical_safe(&s_spinlock); } + assert(valid); #endif } return ESP_OK; } + + +esp_err_t esp_cache_aligned_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size) +{ + ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer"); + + uint32_t cache_level = CACHE_LL_LEVEL_INT_MEM; + uint32_t heap_caps = 0; + uint32_t data_cache_line_size = 0; + void *ptr = NULL; + + if (flags & ESP_CACHE_MALLOC_FLAG_PSRAM) { + cache_level = CACHE_LL_LEVEL_EXT_MEM; + heap_caps |= MALLOC_CAP_SPIRAM; + } else { + heap_caps |= MALLOC_CAP_INTERNAL; + if (flags & ESP_CACHE_MALLOC_FLAG_DMA) { + heap_caps |= MALLOC_CAP_DMA; + } + } + +#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE + data_cache_line_size = cache_hal_get_cache_line_size(cache_level, CACHE_TYPE_DATA); +#else + if (cache_level == CACHE_LL_LEVEL_EXT_MEM) { + data_cache_line_size = cache_hal_get_cache_line_size(cache_level, CACHE_TYPE_DATA); + } else { + data_cache_line_size = 4; + } +#endif + + size = ALIGN_UP_BY(size, data_cache_line_size); + ptr = heap_caps_aligned_alloc(data_cache_line_size, size, heap_caps); + ESP_RETURN_ON_FALSE_ISR(ptr, ESP_ERR_NO_MEM, TAG, "no enough heap memory for (%"PRId32")B alignment", data_cache_line_size); + + *out_ptr = ptr; + if (actual_size) { + *actual_size = size; + } + + return ESP_OK; +} + + +esp_err_t esp_cache_aligned_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size) +{ + ESP_RETURN_ON_FALSE_ISR(out_ptr, ESP_ERR_INVALID_ARG, TAG, "null pointer"); + + esp_err_t ret = ESP_FAIL; + size_t size_bytes = 0; + bool ovf = false; + + ovf = __builtin_mul_overflow(n, size, &size_bytes); + ESP_RETURN_ON_FALSE_ISR(!ovf, ESP_ERR_INVALID_ARG, TAG, "wrong size, total size overflow"); + + void *ptr = NULL; + ret = esp_cache_aligned_malloc(size_bytes, flags, &ptr, actual_size); + if (ret == ESP_OK) { + memset(ptr, 0, size_bytes); + *out_ptr = ptr; + } + + return ret; +} diff --git a/components/esp_mm/esp_mmu_map.c b/components/esp_mm/esp_mmu_map.c index 9fbc9d50b5..ecf0ad56a6 100644 --- a/components/esp_mm/esp_mmu_map.c +++ b/components/esp_mm/esp_mmu_map.c @@ -118,6 +118,16 @@ static bool s_is_overlapped(uint32_t block_start, uint32_t block_end, uint32_t n #if CONFIG_APP_BUILD_USE_FLASH_SECTIONS + +static cache_bus_mask_t s_get_bus_mask(uint32_t vaddr_start, uint32_t len) +{ +#if CACHE_LL_EXT_MEM_VIA_L2CACHE + return cache_ll_l2_get_bus(0, vaddr_start, len); +#else + return cache_ll_l1_get_bus(0, vaddr_start, len); +#endif +} + static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums) { /** @@ -133,7 +143,7 @@ static void s_reserve_irom_region(mem_region_t *hw_mem_regions, int region_nums) irom_len_to_reserve += (uint32_t)&_instruction_reserved_start - ALIGN_DOWN_BY((uint32_t)&_instruction_reserved_start, CONFIG_MMU_PAGE_SIZE); irom_len_to_reserve = ALIGN_UP_BY(irom_len_to_reserve, CONFIG_MMU_PAGE_SIZE); - cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_instruction_reserved_start, irom_len_to_reserve); + cache_bus_mask_t bus_mask = s_get_bus_mask((uint32_t)&_instruction_reserved_start, irom_len_to_reserve); for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) { if (bus_mask & hw_mem_regions[i].bus_id) { @@ -161,7 +171,7 @@ static void s_reserve_drom_region(mem_region_t *hw_mem_regions, int region_nums) drom_len_to_reserve += (uint32_t)&_rodata_reserved_start - ALIGN_DOWN_BY((uint32_t)&_rodata_reserved_start, CONFIG_MMU_PAGE_SIZE); drom_len_to_reserve = ALIGN_UP_BY(drom_len_to_reserve, CONFIG_MMU_PAGE_SIZE); - cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)&_rodata_reserved_start, drom_len_to_reserve); + cache_bus_mask_t bus_mask = s_get_bus_mask((uint32_t)&_rodata_reserved_start, drom_len_to_reserve); for (int i = 0; i < SOC_MMU_LINEAR_ADDRESS_REGION_NUM; i++) { if (bus_mask & hw_mem_regions[i].bus_id) { diff --git a/components/esp_mm/include/esp_cache.h b/components/esp_mm/include/esp_cache.h index 848bee251e..f8efcb979c 100644 --- a/components/esp_mm/include/esp_cache.h +++ b/components/esp_mm/include/esp_cache.h @@ -36,6 +36,15 @@ extern "C" { * @brief Cache msync direction: from memory to Cache */ #define ESP_CACHE_MSYNC_FLAG_DIR_M2C BIT(3) +/** + * @brief Cache msync type: data + * @note If you don't set type (ESP_CACHE_MSYNC_FLAG_TYPE_x flags), it is by default data type + */ +#define ESP_CACHE_MSYNC_FLAG_TYPE_DATA BIT(4) +/** + * @brief Cache msync type: instruction + */ +#define ESP_CACHE_MSYNC_FLAG_TYPE_INST BIT(5) /** * @brief Memory sync between Cache and storage memory @@ -54,6 +63,7 @@ extern "C" { * This API is cache-safe and thread-safe * * @note If you don't set direction (ESP_CACHE_MSYNC_FLAG_DIR_x flags), this API is by default C2M direction + * @note If you don't set type (ESP_CACHE_MSYNC_FLAG_TYPE_x flags), this API is by default doing msync for data * @note You should not call this during any Flash operations (e.g. esp_flash APIs, nvs and some other APIs that are based on esp_flash APIs) * @note If XIP_From_PSRAM is enabled (by enabling both CONFIG_SPIRAM_FETCH_INSTRUCTIONS and CONFIG_SPIRAM_RODATA), you can call this API during Flash operations * diff --git a/components/esp_mm/include/esp_private/esp_cache_private.h b/components/esp_mm/include/esp_private/esp_cache_private.h new file mode 100644 index 0000000000..7735d298b9 --- /dev/null +++ b/components/esp_mm/include/esp_private/esp_cache_private.h @@ -0,0 +1,64 @@ +/* + * SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ +#pragma once + +#include +#include +#include "esp_err.h" +#include "esp_bit_defs.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Cache malloc flags + */ +/** + * @brief Memory is in PSRAM + */ +#define ESP_CACHE_MALLOC_FLAG_PSRAM BIT(0) + +/** + * @brief Memory is capable to be accessed by DMA + */ +#define ESP_CACHE_MALLOC_FLAG_DMA BIT(1) + +/** + * @brief Helper function for malloc a cache aligned memory buffer + * + * @param[in] size Size in bytes, the amount of memory to allocate + * @param[in] flags Flags, see `ESP_CACHE_MALLOC_FLAG_x` + * @param[out] out_ptr A pointer to the memory allocated successfully + * @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the cache alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value. + * + * @return + * - ESP_OK: + * - ESP_ERR_INVALID_ARG: Invalid argument + * - ESP_ERR_NO_MEM: No enough memory for allocation + */ +esp_err_t esp_cache_aligned_malloc(size_t size, uint32_t flags, void **out_ptr, size_t *actual_size); + +/** + * @brief Helper function for calloc a cache aligned memory buffer + * + * @param[in] n Number of continuing chunks of memory to allocate + * @param[in] size Size of one chunk, in bytes + * @param[in] flags Flags, see `ESP_CACHE_MALLOC_FLAG_x` + * @param[out] out_ptr A pointer to the memory allocated successfully + * @param[out] actual_size Actual size for allocation in bytes, when the size you specified doesn't meet the cache alignment requirements, this value might be bigger than the size you specified. Set null if you don't care this value. + * + * @return + * - ESP_OK: + * - ESP_ERR_INVALID_ARG: Invalid argument + * - ESP_ERR_NO_MEM: No enough memory for allocation + */ +esp_err_t esp_cache_aligned_calloc(size_t n, size_t size, uint32_t flags, void **out_ptr, size_t *actual_size); + + +#ifdef __cplusplus +} +#endif diff --git a/components/esp_mm/test_apps/mm/main/test_cache_msync.c b/components/esp_mm/test_apps/mm/main/test_cache_msync.c index 993edc7ff7..21ad6e5f00 100644 --- a/components/esp_mm/test_apps/mm/main/test_cache_msync.c +++ b/components/esp_mm/test_apps/mm/main/test_cache_msync.c @@ -23,6 +23,7 @@ #include "esp_partition.h" #include "esp_flash.h" #include "test_mm_utils.h" +#include "soc/ext_mem_defs.h" const static char *TAG = "CACHE_TEST"; @@ -32,9 +33,11 @@ const static char *TAG = "CACHE_TEST"; #define TEST_OFFSET 0x100000 #if CONFIG_IDF_TARGET_ESP32S2 -#define TEST_SYNC_START (0x3F500000 + TEST_OFFSET) +#define TEST_SYNC_START (SOC_DPORT_CACHE_ADDRESS_LOW + TEST_OFFSET) #elif CONFIG_IDF_TARGET_ESP32S3 -#define TEST_SYNC_START (0x3C000000 + TEST_OFFSET) +#define TEST_SYNC_START (SOC_DRAM0_CACHE_ADDRESS_LOW + TEST_OFFSET) +#elif CONFIG_IDF_TARGET_ESP32P4 +#define TEST_SYNC_START (SOC_DRAM_PSRAM_ADDRESS_LOW + TEST_OFFSET) #endif #define TEST_SYNC_SIZE 0x8000 diff --git a/components/esp_system/port/cpu_start.c b/components/esp_system/port/cpu_start.c index 3c720d122a..4846099596 100644 --- a/components/esp_system/port/cpu_start.c +++ b/components/esp_system/port/cpu_start.c @@ -341,7 +341,7 @@ void IRAM_ATTR do_multicore_settings(void) cache_bus_mask_t cache_bus_mask_core0 = cache_ll_l1_get_enabled_bus(0); #ifndef CONFIG_IDF_TARGET_ESP32 // 1. disable the cache before changing its settings. - cache_hal_disable(CACHE_TYPE_ALL); + cache_hal_disable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif for (unsigned core = 1; core < SOC_CPU_CORES_NUM; core++) { // 2. change cache settings. All cores must have the same settings. @@ -349,7 +349,7 @@ void IRAM_ATTR do_multicore_settings(void) } #ifndef CONFIG_IDF_TARGET_ESP32 // 3. enable the cache after changing its settings. - cache_hal_enable(CACHE_TYPE_ALL); + cache_hal_enable(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); #endif } #endif //#if !CONFIG_IDF_TARGET_ESP32P4 @@ -496,7 +496,7 @@ void IRAM_ATTR call_start_cpu0(void) #endif // CONFIG_IDF_TARGET_ESP32S3 #if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7516, add cache init API + //TODO: IDF-5670, add cache init API extern void esp_config_l2_cache_mode(void); esp_config_l2_cache_mode(); #endif diff --git a/components/esp_system/port/soc/esp32p4/Kconfig.cache b/components/esp_system/port/soc/esp32p4/Kconfig.cache index 46b88d7309..90bc34a829 100644 --- a/components/esp_system/port/soc/esp32p4/Kconfig.cache +++ b/components/esp_system/port/soc/esp32p4/Kconfig.cache @@ -1,43 +1,47 @@ menu "Cache config" - choice ESP32P4_L2_CACHE_SIZE + choice CACHE_L2_CACHE_SIZE prompt "L2 cache size" - default ESP32P4_L2_CACHE_128KB + default CACHE_L2_CACHE_128KB help L2 cache size to be set on application startup. - config ESP32P4_L2_CACHE_128KB + config CACHE_L2_CACHE_128KB bool "128KB" - config ESP32P4_L2_CACHE_256KB + config CACHE_L2_CACHE_256KB bool "256KB" - config ESP32P4_L2_CACHE_512KB + config CACHE_L2_CACHE_512KB bool "512KB" endchoice - config ESP32P4_L2_CACHE_SIZE + config CACHE_L2_CACHE_SIZE hex - default 0x20000 if ESP32P4_L2_CACHE_128KB - default 0x40000 if ESP32P4_L2_CACHE_256KB - default 0x80000 if ESP32P4_L2_CACHE_512KB + default 0x20000 if CACHE_L2_CACHE_128KB + default 0x40000 if CACHE_L2_CACHE_256KB + default 0x80000 if CACHE_L2_CACHE_512KB - choice ESP32P4_L2_CACHE_LINE_SIZE + choice CACHE_L2_CACHE_LINE_SIZE prompt "L2 cache line size" - default ESP32P4_L2_CACHE_LINE_64B if ESP32P4_L2_CACHE_128KB - default ESP32P4_L2_CACHE_LINE_64B if ESP32P4_L2_CACHE_256KB - default ESP32P4_L2_CACHE_LINE_128B if ESP32P4_L2_CACHE_512KB + default CACHE_L2_CACHE_LINE_64B if CACHE_L2_CACHE_128KB + default CACHE_L2_CACHE_LINE_64B if CACHE_L2_CACHE_256KB + default CACHE_L2_CACHE_LINE_128B if CACHE_L2_CACHE_512KB help L2 cache line size to be set on application startup. - config ESP32P4_L2_CACHE_LINE_64B + config CACHE_L2_CACHE_LINE_64B bool "64 Bytes" - depends on ESP32P4_L2_CACHE_128KB || ESP32P4_L2_CACHE_256KB - config ESP32P4_L2_CACHE_LINE_128B + depends on CACHE_L2_CACHE_128KB || CACHE_L2_CACHE_256KB + config CACHE_L2_CACHE_LINE_128B bool "128 Bytes" endchoice - config ESP32P4_L2_CACHE_LINE_SIZE + config CACHE_L2_CACHE_LINE_SIZE int - default 64 if ESP32P4_L2_CACHE_LINE_64B - default 128 if ESP32P4_L2_CACHE_LINE_128B + default 64 if CACHE_L2_CACHE_LINE_64B + default 128 if CACHE_L2_CACHE_LINE_128B + + config CACHE_L1_CACHE_LINE_SIZE + int + default 64 endmenu # Cache config diff --git a/components/hal/cache_hal.c b/components/hal/cache_hal.c index c2ab573dcd..6252970e43 100644 --- a/components/hal/cache_hal.c +++ b/components/hal/cache_hal.c @@ -5,6 +5,7 @@ */ #include #include +#include #include "sdkconfig.h" #include "esp_err.h" #include "esp_attr.h" @@ -24,47 +25,45 @@ * Now this file doesn't compile on ESP32 *----------------------------------------------------------------------------*/ -/** - * To know if autoload is enabled or not. - * - * We should have a unified flag for this aim, then we don't need to call following 2 functions - * to know the flag. - * - * Suggest ROM keeping this flag value to BIT(2). Then we can replace following lines to: - * #define DATA_AUTOLOAD_FLAG BIT(2) - * #define INST_AUTOLOAD_FLAG BIT(2) - */ -#if CONFIG_IDF_TARGET_ESP32P4 //TODO: IDF-7516 -#define DATA_AUTOLOAD_ENABLE Cache_Disable_L2_Cache() -#define INST_AUTOLOAD_ENABLE Cache_Disable_L2_Cache() -#else -#define DATA_AUTOLOAD_ENABLE cache_ll_is_cache_autoload_enabled(CACHE_TYPE_DATA) -#define INST_AUTOLOAD_ENABLE cache_ll_is_cache_autoload_enabled(CACHE_TYPE_INSTRUCTION) -#endif - /** * Necessary hal contexts, could be maintained by upper layer in the future */ typedef struct { - bool data_autoload_en; - bool inst_autoload_en; + bool i_autoload_en; + bool d_autoload_en; #if CACHE_LL_ENABLE_DISABLE_STATE_SW // There's no register indicating if cache is enabled on these chips, use sw flag to save this state. - volatile bool cache_enabled; + bool i_cache_enabled; + bool d_cache_enabled; #endif +} cache_hal_state_t; + + +typedef struct { + cache_hal_state_t l1; + cache_hal_state_t l2; } cache_hal_context_t; static cache_hal_context_t ctx; + +void s_cache_hal_init_ctx(void) +{ + ctx.l1.d_autoload_en = cache_ll_is_cache_autoload_enabled(1, CACHE_TYPE_DATA, CACHE_LL_ID_ALL); + ctx.l1.i_autoload_en = cache_ll_is_cache_autoload_enabled(1, CACHE_TYPE_INSTRUCTION, CACHE_LL_ID_ALL); + ctx.l2.d_autoload_en = cache_ll_is_cache_autoload_enabled(2, CACHE_TYPE_DATA, CACHE_LL_ID_ALL); + ctx.l2.i_autoload_en = cache_ll_is_cache_autoload_enabled(2, CACHE_TYPE_INSTRUCTION, CACHE_LL_ID_ALL); +} + void cache_hal_init(void) { - ctx.data_autoload_en = DATA_AUTOLOAD_ENABLE; - ctx.inst_autoload_en = INST_AUTOLOAD_ENABLE; -#if SOC_CACHE_L2_SUPPORTED - Cache_Enable_L2_Cache(ctx.inst_autoload_en); -#else - cache_ll_enable_cache(CACHE_TYPE_ALL, ctx.inst_autoload_en, ctx.data_autoload_en); -#endif //SOC_CACHE_L2_SUPPORTED + s_cache_hal_init_ctx(); + + if (CACHE_LL_LEVEL_EXT_MEM == 1) { + cache_ll_enable_cache(1, CACHE_TYPE_ALL, CACHE_LL_ID_ALL, ctx.l1.i_autoload_en, ctx.l1.d_autoload_en); + } else if (CACHE_LL_LEVEL_EXT_MEM == 2) { + cache_ll_enable_cache(2, CACHE_TYPE_ALL, CACHE_LL_ID_ALL, ctx.l2.i_autoload_en, ctx.l2.d_autoload_en); + } cache_ll_l1_enable_bus(0, CACHE_LL_DEFAULT_DBUS_MASK); cache_ll_l1_enable_bus(0, CACHE_LL_DEFAULT_IBUS_MASK); @@ -74,125 +73,222 @@ void cache_hal_init(void) #endif #if CACHE_LL_ENABLE_DISABLE_STATE_SW - ctx.cache_enabled = 1; + ctx.l1.i_cache_enabled = 1; + ctx.l1.d_cache_enabled = 1; + ctx.l2.i_cache_enabled = 1; + ctx.l2.d_cache_enabled = 1; #endif } -void cache_hal_disable(cache_type_t type) +#if CACHE_LL_ENABLE_DISABLE_STATE_SW +void s_update_cache_state(uint32_t cache_level, cache_type_t type, bool en) { -#if SOC_CACHE_L2_SUPPORTED - Cache_Disable_L2_Cache(); -#else - cache_ll_disable_cache(type); -#endif //SOC_CACHE_L2_SUPPORTED + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + switch (cache_level) { + case 1: + if (type == CACHE_TYPE_INSTRUCTION) { + ctx.l1.i_cache_enabled = en; + break; + } else if (type == CACHE_TYPE_DATA) { + ctx.l1.d_cache_enabled = en; + break; + } else if (type == CACHE_TYPE_ALL) { + ctx.l1.i_cache_enabled = en; + ctx.l1.d_cache_enabled = en; + break; + } else { + HAL_ASSERT(false); + break; + } + case 2: + if (type == CACHE_TYPE_INSTRUCTION) { + ctx.l2.i_cache_enabled = en; + break; + } else if (type == CACHE_TYPE_DATA) { + ctx.l2.d_cache_enabled = en; + break; + } else if (type == CACHE_TYPE_ALL) { + ctx.l2.i_cache_enabled = en; + ctx.l2.d_cache_enabled = en; + break; + } else { + HAL_ASSERT(false); + break; + } + default: + HAL_ASSERT(false); + break; + } +} + +bool s_get_cache_state(uint32_t cache_level, cache_type_t type) +{ + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + bool enabled = false; + + switch (cache_level) { + case 1: + if (type == CACHE_TYPE_INSTRUCTION) { + enabled = ctx.l1.i_cache_enabled; + break; + } else if (type == CACHE_TYPE_DATA) { + enabled = ctx.l1.d_cache_enabled; + break; + } else if (type == CACHE_TYPE_ALL) { + enabled = ctx.l1.i_cache_enabled; + enabled &= ctx.l1.d_cache_enabled; + break; + } else { + HAL_ASSERT(false); + break; + } + case 2: + if (type == CACHE_TYPE_INSTRUCTION) { + enabled = ctx.l2.i_cache_enabled; + break; + } else if (type == CACHE_TYPE_DATA) { + enabled = ctx.l2.d_cache_enabled; + break; + } else if (type == CACHE_TYPE_ALL) { + enabled = ctx.l2.i_cache_enabled; + enabled &= ctx.l2.d_cache_enabled; + break; + } else { + HAL_ASSERT(false); + break; + } + default: + HAL_ASSERT(false); + break; + } + + return enabled; +} +#endif //#if CACHE_LL_ENABLE_DISABLE_STATE_SW + +void cache_hal_disable(uint32_t cache_level, cache_type_t type) +{ + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + cache_ll_disable_cache(cache_level, type, CACHE_LL_ID_ALL); #if CACHE_LL_ENABLE_DISABLE_STATE_SW - ctx.cache_enabled = 0; + s_update_cache_state(cache_level, type, false); #endif } -void cache_hal_enable(cache_type_t type) +void cache_hal_enable(uint32_t cache_level, cache_type_t type) { -#if SOC_CACHE_L2_SUPPORTED - Cache_Enable_L2_Cache(ctx.inst_autoload_en); -#else - cache_ll_enable_cache(type, ctx.inst_autoload_en, ctx.data_autoload_en); -#endif //SOC_CACHE_L2_SUPPORTED + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + if (cache_level == 1) { + cache_ll_enable_cache(1, type, CACHE_LL_ID_ALL, ctx.l1.i_autoload_en, ctx.l1.d_autoload_en); + } else if (cache_level == 2) { + cache_ll_enable_cache(2, type, CACHE_LL_ID_ALL, ctx.l2.i_autoload_en, ctx.l2.d_autoload_en); + } #if CACHE_LL_ENABLE_DISABLE_STATE_SW - ctx.cache_enabled = 1; + s_update_cache_state(cache_level, type, true); #endif } -void cache_hal_suspend(cache_type_t type) +void cache_hal_suspend(uint32_t cache_level, cache_type_t type) { -#if SOC_CACHE_L2_SUPPORTED - Cache_Suspend_L2_Cache(); -#else - cache_ll_suspend_cache(type); -#endif //SOC_CACHE_L2_SUPPORTED + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + cache_ll_suspend_cache(cache_level, type, CACHE_LL_ID_ALL); #if CACHE_LL_ENABLE_DISABLE_STATE_SW - ctx.cache_enabled = 0; + s_update_cache_state(cache_level, type, false); #endif } -void cache_hal_resume(cache_type_t type) +void cache_hal_resume(uint32_t cache_level, cache_type_t type) { -#if SOC_CACHE_L2_SUPPORTED - Cache_Resume_L2_Cache(ctx.inst_autoload_en); -#else - cache_ll_resume_cache(type, ctx.inst_autoload_en, ctx.data_autoload_en); -#endif + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + if (cache_level == 1) { + cache_ll_resume_cache(1, type, CACHE_LL_ID_ALL, ctx.l1.i_autoload_en, ctx.l1.d_autoload_en); + } else if (cache_level == 2) { + cache_ll_resume_cache(2, type, CACHE_LL_ID_ALL, ctx.l2.i_autoload_en, ctx.l2.d_autoload_en); + } #if CACHE_LL_ENABLE_DISABLE_STATE_SW - ctx.cache_enabled = 1; + s_update_cache_state(cache_level, type, true); #endif } -bool cache_hal_is_cache_enabled(cache_type_t type) +bool cache_hal_is_cache_enabled(uint32_t cache_level, cache_type_t type) { - bool enabled; + bool enabled = false; #if CACHE_LL_ENABLE_DISABLE_STATE_SW - enabled = ctx.cache_enabled; + enabled = s_get_cache_state(cache_level, type); #else enabled = cache_ll_is_cache_enabled(type); #endif //CACHE_LL_ENABLE_DISABLE_STATE_SW return enabled; } -void cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size) +bool cache_hal_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) { - //Now only esp32 has 2 MMUs, this file doesn't build on esp32 - HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(0, vaddr, size, MMU_VADDR_DATA | MMU_VADDR_INSTRUCTION)); -#if CONFIG_IDF_TARGET_ESP32P4 - Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE | CACHE_MAP_L2_CACHE, vaddr, size); -#else - cache_ll_invalidate_addr(vaddr, size); -#endif + if (!out_level || !out_id) { + return false; + } + return cache_ll_vaddr_to_cache_level_id(vaddr_start, len, out_level, out_id); +} + +bool cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size) +{ + bool valid = false; + uint32_t cache_level = 0; + uint32_t cache_id = 0; + + valid = cache_hal_vaddr_to_cache_level_id(vaddr, size, &cache_level, &cache_id); + if (valid) { + cache_ll_invalidate_addr(cache_level, CACHE_TYPE_ALL, cache_id, vaddr, size); + } + + return valid; } #if SOC_CACHE_WRITEBACK_SUPPORTED -void cache_hal_writeback_addr(uint32_t vaddr, uint32_t size) +bool cache_hal_writeback_addr(uint32_t vaddr, uint32_t size) { - HAL_ASSERT(mmu_hal_check_valid_ext_vaddr_region(0, vaddr, size, MMU_VADDR_DATA)); -#if CONFIG_IDF_TARGET_ESP32P4 - Cache_WriteBack_Addr(CACHE_MAP_L1_DCACHE, vaddr, size); - Cache_WriteBack_Addr(CACHE_MAP_L2_CACHE, vaddr, size); -#else - cache_ll_writeback_addr(vaddr, size); -#endif + bool valid = false; + uint32_t cache_level = 0; + uint32_t cache_id = 0; + + valid = cache_hal_vaddr_to_cache_level_id(vaddr, size, &cache_level, &cache_id); + if (valid) { + cache_ll_writeback_addr(cache_level, CACHE_TYPE_DATA, cache_id, vaddr, size); + } + + return valid; } #endif //#if SOC_CACHE_WRITEBACK_SUPPORTED - #if SOC_CACHE_FREEZE_SUPPORTED -void cache_hal_freeze(cache_type_t type) +void cache_hal_freeze(uint32_t cache_level, cache_type_t type) { -#if SOC_CACHE_L2_SUPPORTED - Cache_Freeze_L2_Cache_Enable(CACHE_FREEZE_ACK_BUSY); -#else - cache_ll_freeze_cache(type); -#endif //SOC_CACHE_L2_SUPPORTED + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + cache_ll_freeze_cache(cache_level, type, CACHE_LL_ID_ALL); } -void cache_hal_unfreeze(cache_type_t type) +void cache_hal_unfreeze(uint32_t cache_level, cache_type_t type) { -#if SOC_CACHE_L2_SUPPORTED - Cache_Freeze_L2_Cache_Disable(); -#else - cache_ll_unfreeze_cache(type); -#endif //SOC_CACHE_L2_SUPPORTED + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + cache_ll_unfreeze_cache(cache_level, type, CACHE_LL_ID_ALL); } #endif //#if SOC_CACHE_FREEZE_SUPPORTED -uint32_t cache_hal_get_cache_line_size(cache_type_t type) +uint32_t cache_hal_get_cache_line_size(uint32_t cache_level, cache_type_t type) { - uint32_t line_size = 0; -#if SOC_CACHE_L2_SUPPORTED - line_size = Cache_Get_L2_Cache_Line_Size(); -#else - line_size = cache_ll_get_line_size(type); -#endif //SOC_CACHE_L2_SUPPORTED + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + + uint32_t line_size = cache_ll_get_line_size(cache_level, type, CACHE_LL_ID_ALL); + return line_size; } diff --git a/components/hal/esp32/cache_hal_esp32.c b/components/hal/esp32/cache_hal_esp32.c index 2cd500775f..6b4fd2037d 100644 --- a/components/hal/esp32/cache_hal_esp32.c +++ b/components/hal/esp32/cache_hal_esp32.c @@ -14,7 +14,7 @@ static uint32_t s_cache_status[2]; * There's a bug that Cache_Read_Disable requires a call to Cache_Flush * before Cache_Read_Enable, even if cached data was not modified. */ -void cache_hal_suspend(cache_type_t type) +void cache_hal_suspend(uint32_t cache_level, cache_type_t type) { s_cache_status[0] = cache_ll_l1_get_enabled_bus(0); cache_ll_l1_disable_cache(0); @@ -25,7 +25,7 @@ void cache_hal_suspend(cache_type_t type) } -void cache_hal_resume(cache_type_t type) +void cache_hal_resume(uint32_t cache_level, cache_type_t type) { cache_ll_l1_enable_cache(0); cache_ll_l1_enable_bus(0, s_cache_status[0]); @@ -36,7 +36,7 @@ void cache_hal_resume(cache_type_t type) } -bool cache_hal_is_cache_enabled(cache_type_t type) +bool cache_hal_is_cache_enabled(uint32_t cache_level, cache_type_t type) { bool result = cache_ll_l1_is_cache_enabled(0, CACHE_TYPE_ALL); #if !CONFIG_FREERTOS_UNICORE @@ -44,3 +44,23 @@ bool cache_hal_is_cache_enabled(cache_type_t type) #endif return result; } + +bool cache_hal_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + if (!out_level || !out_id) { + return false; + } + return cache_ll_vaddr_to_cache_level_id(vaddr_start, len, out_level, out_id); +} + +uint32_t cache_hal_get_cache_line_size(uint32_t cache_level, cache_type_t type) +{ + HAL_ASSERT(cache_level && (cache_level <= CACHE_LL_LEVEL_NUMS)); + return 4; +} + +bool cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size) +{ + //esp32 doesn't support invalidate certain addr + abort(); +} diff --git a/components/hal/esp32/include/hal/cache_ll.h b/components/hal/esp32/include/hal/cache_ll.h index b0256d099d..773202ad27 100644 --- a/components/hal/esp32/include/hal/cache_ll.h +++ b/components/hal/esp32/include/hal/cache_ll.h @@ -19,6 +19,12 @@ extern "C" { #endif +#define CACHE_LL_ID_ALL 2 //All of the caches in a type and level, make this value greater than any ID +#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels + /** * @brief enable a cache unit * @@ -27,7 +33,7 @@ extern "C" { __attribute__((always_inline)) static inline void cache_ll_l1_enable_cache(uint32_t cache_id) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); if (cache_id == 0) { DPORT_REG_SET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE); @@ -67,7 +73,7 @@ static inline void cache_ll_l1_disable_cache(uint32_t cache_id) __attribute__((always_inline)) static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); (void) type; //On 32 it shares between I and D cache bool enabled; @@ -94,7 +100,7 @@ __attribute__((always_inline)) #endif static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); cache_bus_mask_t mask = (cache_bus_mask_t)0; uint32_t vaddr_end = vaddr_start + len - 1; @@ -135,7 +141,7 @@ __attribute__((always_inline)) static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) { (void) mask; - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); uint32_t bus_mask = 0; if (cache_id == 0) { @@ -170,7 +176,7 @@ __attribute__((always_inline)) static inline cache_bus_mask_t cache_ll_l1_get_enabled_bus(uint32_t cache_id) { cache_bus_mask_t mask = (cache_bus_mask_t)0; - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); if (cache_id == 0) { uint32_t bus_mask= DPORT_REG_READ(DPORT_PRO_CACHE_CTRL1_REG); mask = (cache_bus_mask_t)(mask | ((!(bus_mask & DPORT_PRO_CACHE_MASK_IRAM0)) ? CACHE_BUS_IBUS0 : 0)); @@ -202,7 +208,7 @@ __attribute__((always_inline)) static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) { (void) mask; - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); uint32_t bus_mask = 0; if (cache_id == 0) { @@ -226,6 +232,33 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m } } +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid + */ +__attribute__((always_inline)) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; + + valid |= ((vaddr_start >= SOC_DROM0_CACHE_ADDRESS_LOW) && (vaddr_end < SOC_DROM0_CACHE_ADDRESS_HIGH)) || ((vaddr_start >= SOC_DRAM1_CACHE_ADDRESS_LOW) && (vaddr_end < SOC_DRAM1_CACHE_ADDRESS_HIGH)); + valid |= ((vaddr_start >= SOC_IRAM0_CACHE_ADDRESS_LOW) && (vaddr_end < SOC_IRAM0_CACHE_ADDRESS_HIGH)); + + if (valid) { + *out_level = 1; + *out_id = 0; + } + + return valid; +} + #ifdef __cplusplus } #endif diff --git a/components/hal/esp32c2/include/hal/cache_ll.h b/components/hal/esp32c2/include/hal/cache_ll.h index b4820799df..ba192c98b6 100644 --- a/components/hal/esp32c2/include/hal/cache_ll.h +++ b/components/hal/esp32c2/include/hal/cache_ll.h @@ -36,72 +36,83 @@ extern "C" { #define CACHE_LL_L1_ILG_EVENT_PRELOAD_OP_FAULT (1<<1) #define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0) +#define CACHE_LL_ID_ALL 1 //All of the caches in a type and level, make this value greater than any ID +#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels //On ESP32C2, the auto preload flag is always 0 #define CACHE_LL_L1_ICACHE_AUTOLOAD 0 /** - * @brief Check if Cache auto preload is enabled or not. On ESP32C2, instructions and data share Cache + * @brief Check if Cache auto preload is enabled or not. * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * - * @return false (On ESP32C2, it's always false) + * @return true: enabled; false: disabled */ __attribute__((always_inline)) -static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) +static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { bool enabled = false; return enabled; } /** - * @brief Disable Cache. On ESP32C2, instructions and data share Cache + * @brief Disable Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_disable_cache(cache_type_t type) +static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { (void) type; Cache_Disable_ICache(); } /** - * @brief Enable Cache. On ESP32C2, instructions and data share Cache + * @brief Enable Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_enable_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Enable_ICache(CACHE_LL_L1_ICACHE_AUTOLOAD); } /** - * @brief Suspend Cache. On ESP32C2, instructions and data share Cache + * @brief Suspend Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_suspend_cache(cache_type_t type) +static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Suspend_ICache(); } /** - * @brief Resume Cache. On ESP32C2, instructions and data share Cache + * @brief Resume Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Resume_ICache(CACHE_LL_L1_ICACHE_AUTOLOAD); } @@ -124,13 +135,16 @@ static inline bool cache_ll_is_cache_enabled(cache_type_t type) /** * @brief Invalidate cache supported addr * - * Invalidate a Cache item + * Invalidate a cache item * - * @param vaddr Start address of the region to be invalidated - * @param size Size of the region to be invalidated + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated */ __attribute__((always_inline)) -static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_Invalidate_Addr(vaddr, size); } @@ -138,12 +152,14 @@ static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) /** * @brief Get Cache line size, in bytes * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * * @return Cache line size, in bytes */ __attribute__((always_inline)) -static inline uint32_t cache_ll_get_line_size(cache_type_t type) +static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { uint32_t size = 0; size = Cache_Get_ICache_Line_Size(); @@ -165,7 +181,7 @@ __attribute__((always_inline)) #endif static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); cache_bus_mask_t mask = (cache_bus_mask_t)0; uint32_t vaddr_end = vaddr_start + len - 1; @@ -191,7 +207,7 @@ __attribute__((always_inline)) #endif static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32c2, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -213,7 +229,7 @@ static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t ma __attribute__((always_inline)) static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32c2, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -226,6 +242,33 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m REG_SET_BIT(EXTMEM_ICACHE_CTRL1_REG, dbus_mask); } +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid + */ +__attribute__((always_inline)) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; + + valid |= (SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_end)); + valid |= (SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_end)); + + if (valid) { + *out_level = 1; + *out_id = 0; + } + + return valid; +} + /*------------------------------------------------------------------------------ * Interrupt *----------------------------------------------------------------------------*/ diff --git a/components/hal/esp32c3/include/hal/cache_ll.h b/components/hal/esp32c3/include/hal/cache_ll.h index 7ea850b02d..71022d0121 100644 --- a/components/hal/esp32c3/include/hal/cache_ll.h +++ b/components/hal/esp32c3/include/hal/cache_ll.h @@ -36,18 +36,26 @@ extern "C" { #define CACHE_LL_L1_ILG_EVENT_PRELOAD_OP_FAULT (1<<1) #define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0) +#define CACHE_LL_ID_ALL 1 //All of the caches in a type and level, make this value greater than any ID +#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels #define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<2) /** - * @brief Check if Cache auto preload is enabled or not. On ESP32C3, instructions and data share Cache + * @brief Check if Cache auto preload is enabled or not. * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * * @return true: enabled; false: disabled */ __attribute__((always_inline)) -static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) +static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); bool enabled = false; if (REG_GET_BIT(EXTMEM_ICACHE_AUTOLOAD_CTRL_REG, EXTMEM_ICACHE_AUTOLOAD_ENA)) { enabled = true; @@ -56,54 +64,58 @@ static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) } /** - * @brief Disable Cache. On ESP32C3, instructions and data share Cache + * @brief Disable Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_disable_cache(cache_type_t type) +static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { (void) type; Cache_Disable_ICache(); } /** - * @brief Enable Cache. On ESP32C3, instructions and data share Cache + * @brief Enable Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_enable_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Enable_ICache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0); } /** - * @brief Suspend Cache. On ESP32C3, instructions and data share Cache + * @brief Suspend Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_suspend_cache(cache_type_t type) +static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Suspend_ICache(); } /** - * @brief Resume Cache. On ESP32C3, instructions and data share Cache + * @brief Resume Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Resume_ICache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0); } @@ -126,13 +138,16 @@ static inline bool cache_ll_is_cache_enabled(cache_type_t type) /** * @brief Invalidate cache supported addr * - * Invalidate a Cache + * Invalidate a cache item * - * @param vaddr Start address of the region to be invalidated - * @param size Size of the region to be invalidated + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated */ __attribute__((always_inline)) -static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_Invalidate_Addr(vaddr, size); } @@ -140,12 +155,14 @@ static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) /** * @brief Get Cache line size, in bytes * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * * @return Cache line size, in bytes */ __attribute__((always_inline)) -static inline uint32_t cache_ll_get_line_size(cache_type_t type) +static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { uint32_t size = 0; size = Cache_Get_ICache_Line_Size(); @@ -167,7 +184,7 @@ __attribute__((always_inline)) #endif static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); cache_bus_mask_t mask = (cache_bus_mask_t)0; uint32_t vaddr_end = vaddr_start + len - 1; @@ -193,7 +210,7 @@ __attribute__((always_inline)) #endif static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32c3, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -215,7 +232,7 @@ static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t ma __attribute__((always_inline)) static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32c3, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -228,6 +245,33 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m REG_SET_BIT(EXTMEM_ICACHE_CTRL1_REG, dbus_mask); } +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid + */ +__attribute__((always_inline)) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; + + valid |= (SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_end)); + valid |= (SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_end)); + + if (valid) { + *out_level = 1; + *out_id = 0; + } + + return valid; +} + /*------------------------------------------------------------------------------ * Interrupt *----------------------------------------------------------------------------*/ diff --git a/components/hal/esp32c6/include/hal/cache_ll.h b/components/hal/esp32c6/include/hal/cache_ll.h index 9cf70595d0..6aa203eb45 100644 --- a/components/hal/esp32c6/include/hal/cache_ll.h +++ b/components/hal/esp32c6/include/hal/cache_ll.h @@ -26,18 +26,26 @@ extern "C" { #define CACHE_LL_L1_ACCESS_EVENT_MASK (1<<4) #define CACHE_LL_L1_ACCESS_EVENT_CACHE_FAIL (1<<4) +#define CACHE_LL_ID_ALL 1 //All of the caches in a type and level, make this value greater than any ID +#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels #define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0) /** - * @brief Check if Cache auto preload is enabled or not. On ESP32C6, instructions and data share Cache + * @brief Check if Cache auto preload is enabled or not. * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * * @return true: enabled; false: disabled */ __attribute__((always_inline)) -static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) +static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); bool enabled = false; if (REG_GET_BIT(EXTMEM_L1_CACHE_AUTOLOAD_CTRL_REG, EXTMEM_L1_CACHE_AUTOLOAD_ENA)) { enabled = true; @@ -46,54 +54,58 @@ static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) } /** - * @brief Disable Cache. On ESP32C6, instructions and data share Cache + * @brief Disable Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_disable_cache(cache_type_t type) +static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { (void) type; Cache_Disable_ICache(); } /** - * @brief Enable Cache. On ESP32C6, instructions and data share Cache + * @brief Enable Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_enable_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Enable_ICache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0); } /** - * @brief Suspend Cache. On ESP32C6, instructions and data share Cache + * @brief Suspend Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_suspend_cache(cache_type_t type) +static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Suspend_ICache(); } /** - * @brief Resume Cache. On ESP326, instructions and data share Cache + * @brief Resume Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Resume_ICache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0); } @@ -101,48 +113,57 @@ static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_e /** * @brief Invalidate cache supported addr * - * Invalidate a Cache item + * Invalidate a cache item * - * @param vaddr Start address of the region to be invalidated - * @param size Size of the region to be invalidated + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated */ __attribute__((always_inline)) -static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_Invalidate_Addr(vaddr, size); } /** - * @brief Freeze Cache. On ESP32C6, instructions and data share Cache + * @brief Freeze Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_freeze_cache(cache_type_t type) +static inline void cache_ll_freeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Freeze_ICache_Enable(CACHE_FREEZE_ACK_BUSY); } /** - * @brief Unfreeze Cache. On ESP32C6, instructions and data share Cache + * @brief Unfreeze Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_unfreeze_cache(cache_type_t type) +static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Freeze_ICache_Disable(); } /** - * @brief Get cache line size, in bytes + * @brief Get Cache line size, in bytes * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * - * @return cache line size, in bytes + * @return Cache line size, in bytes */ __attribute__((always_inline)) -static inline uint32_t cache_ll_get_line_size(cache_type_t type) +static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { uint32_t size = 0; size = Cache_Get_ICache_Line_Size(); @@ -164,7 +185,7 @@ __attribute__((always_inline)) #endif static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); cache_bus_mask_t mask = (cache_bus_mask_t)0; uint32_t vaddr_end = vaddr_start + len - 1; @@ -189,7 +210,7 @@ __attribute__((always_inline)) #endif static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32c6, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2 | CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -211,7 +232,7 @@ static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t ma __attribute__((always_inline)) static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32c6, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2 | CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -224,6 +245,33 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m REG_SET_BIT(EXTMEM_L1_CACHE_CTRL_REG, dbus_mask); } +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid + */ +__attribute__((always_inline)) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; + + valid |= (SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_end)); + valid |= (SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_end)); + + if (valid) { + *out_level = 1; + *out_id = 0; + } + + return valid; +} + /*------------------------------------------------------------------------------ * Interrupt *----------------------------------------------------------------------------*/ diff --git a/components/hal/esp32h2/include/hal/cache_ll.h b/components/hal/esp32h2/include/hal/cache_ll.h index 8f60f3e635..280e4a489a 100644 --- a/components/hal/esp32h2/include/hal/cache_ll.h +++ b/components/hal/esp32h2/include/hal/cache_ll.h @@ -26,18 +26,26 @@ extern "C" { #define CACHE_LL_L1_ACCESS_EVENT_MASK (1<<4) #define CACHE_LL_L1_ACCESS_EVENT_CACHE_FAIL (1<<4) +#define CACHE_LL_ID_ALL 1 //All of the caches in a type and level, make this value greater than any ID +#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels #define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0) /** - * @brief Check if Cache auto preload is enabled or not. On ESP32h2, instructions and data share Cache + * @brief Check if Cache auto preload is enabled or not. * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * * @return true: enabled; false: disabled */ __attribute__((always_inline)) -static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) +static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); bool enabled = false; if (REG_GET_BIT(CACHE_L1_CACHE_AUTOLOAD_CTRL_REG, CACHE_L1_CACHE_AUTOLOAD_ENA)) { enabled = true; @@ -46,54 +54,58 @@ static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) } /** - * @brief Disable Cache. On ESP32H2, instructions and data share Cache + * @brief Disable Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_disable_cache(cache_type_t type) +static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { (void) type; Cache_Disable_ICache(); } /** - * @brief Enable Cache. On ESP32H2, instructions and data share Cache + * @brief Enable Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_enable_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Enable_ICache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0); } /** - * @brief Suspend Cache. On ESP32H2, instructions and data share Cache + * @brief Suspend Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_suspend_cache(cache_type_t type) +static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Suspend_ICache(); } /** - * @brief Resume Cache. On ESP326, instructions and data share Cache + * @brief Resume Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { Cache_Resume_ICache(inst_autoload_en ? CACHE_LL_L1_ICACHE_AUTOLOAD : 0); } @@ -101,48 +113,57 @@ static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_e /** * @brief Invalidate cache supported addr * - * Invalidate a Cache item + * Invalidate a cache item * - * @param vaddr Start address of the region to be invalidated - * @param size Size of the region to be invalidated + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated */ __attribute__((always_inline)) -static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_Invalidate_Addr(vaddr, size); } /** - * @brief Freeze Cache. On ESP32H2, instructions and data share Cache + * @brief Freeze Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_freeze_cache(cache_type_t type) +static inline void cache_ll_freeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Freeze_ICache_Enable(CACHE_FREEZE_ACK_BUSY); } /** - * @brief Unfreeze Cache. On ESP32H2, instructions and data share Cache + * @brief Unfreeze Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_unfreeze_cache(cache_type_t type) +static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { Cache_Freeze_ICache_Disable(); } /** - * @brief Get cache line size, in bytes + * @brief Get Cache line size, in bytes * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * - * @return cache line size, in bytes + * @return Cache line size, in bytes */ __attribute__((always_inline)) -static inline uint32_t cache_ll_get_line_size(cache_type_t type) +static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { uint32_t size = 0; size = Cache_Get_ICache_Line_Size(); @@ -164,7 +185,7 @@ __attribute__((always_inline)) #endif static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); cache_bus_mask_t mask = (cache_bus_mask_t)0; uint32_t vaddr_end = vaddr_start + len - 1; @@ -189,7 +210,7 @@ __attribute__((always_inline)) #endif static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32h2, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2 | CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -211,7 +232,7 @@ static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t ma __attribute__((always_inline)) static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32h2, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2 | CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -224,6 +245,33 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m REG_SET_BIT(CACHE_L1_CACHE_CTRL_REG, dbus_mask); } +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid + */ +__attribute__((always_inline)) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; + + valid |= (SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_end)); + valid |= (SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_end)); + + if (valid) { + *out_level = 1; + *out_id = 0; + } + + return valid; +} + /*------------------------------------------------------------------------------ * Interrupt *----------------------------------------------------------------------------*/ diff --git a/components/hal/esp32p4/include/hal/cache_ll.h b/components/hal/esp32p4/include/hal/cache_ll.h index 9dd1109c17..2054bebf5d 100644 --- a/components/hal/esp32p4/include/hal/cache_ll.h +++ b/components/hal/esp32p4/include/hal/cache_ll.h @@ -8,10 +8,12 @@ #pragma once +#include #include "soc/cache_reg.h" #include "soc/ext_mem_defs.h" #include "hal/cache_types.h" #include "hal/assert.h" +#include "esp32p4/rom/cache.h" #ifdef __cplusplus extern "C" { @@ -23,43 +25,945 @@ extern "C" { */ #define CACHE_LL_L2MEM_NON_CACHE_ADDR(addr) ((intptr_t)(addr) + 0x40000000) +/** + * Cache capabilities + */ #define CACHE_LL_ENABLE_DISABLE_STATE_SW 1 //There's no register indicating cache enable/disable state, we need to use software way for this state. +#define CACHE_LL_EXT_MEM_VIA_L2CACHE 1 -#define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0 -#define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_DBUS0 +#define CACHE_LL_ID_ALL 3 //All of the caches in a type and level, make this value greater than any ID +#define CACHE_LL_LEVEL_INT_MEM 1 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 2 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 3 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 2 //Number of cache levels +#define CACHE_LL_CACHE_AUTOLOAD (1<<0) -//TODO: IDF-7516 +#define CACHE_LL_DEFAULT_IBUS_MASK (CACHE_BUS_IBUS0 | CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2) +#define CACHE_LL_DEFAULT_DBUS_MASK (CACHE_BUS_DBUS0 | CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2) + +//TODO: IDF-7515 #define CACHE_LL_L1_ACCESS_EVENT_MASK (0x3f) -// #define CACHE_LL_L1_ACCESS_EVENT_DBUS_WR_IC (1<<5) -// #define CACHE_LL_L1_ACCESS_EVENT_DBUS_REJECT (1<<4) -// #define CACHE_LL_L1_ACCESS_EVENT_DBUS_ACS_MSK_IC (1<<3) -// #define CACHE_LL_L1_ACCESS_EVENT_IBUS_REJECT (1<<2) -// #define CACHE_LL_L1_ACCESS_EVENT_IBUS_WR_IC (1<<1) -// #define CACHE_LL_L1_ACCESS_EVENT_IBUS_ACS_MSK_IC (1<<0) -// #define CACHE_LL_L1_ILG_EVENT_MASK (0x23) -// #define CACHE_LL_L1_ILG_EVENT_MMU_ENTRY_FAULT (1<<5) -// #define CACHE_LL_L1_ILG_EVENT_PRELOAD_OP_FAULT (1<<1) -// #define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0) + +/*------------------------------------------------------------------------------ + * Autoload + *----------------------------------------------------------------------------*/ +/** + * @brief Check if L1 ICache autoload is enabled or not + * + * @param cache_id id of the cache in this type and level + * + * @return true: enabled; false: disabled + */ +__attribute__((always_inline)) +static inline bool cache_ll_l1_is_icache_autoload_enabled(uint32_t cache_id) +{ + bool enabled = false; + + if (cache_id == 0) { + enabled = REG_GET_BIT(CACHE_L1_ICACHE0_AUTOLOAD_CTRL_REG, CACHE_L1_ICACHE0_AUTOLOAD_ENA); + } else if (cache_id == 1) { + enabled = REG_GET_BIT(CACHE_L1_ICACHE1_AUTOLOAD_CTRL_REG, CACHE_L1_ICACHE1_AUTOLOAD_ENA); + } else if (cache_id == CACHE_LL_ID_ALL) { + enabled = REG_GET_BIT(CACHE_L1_ICACHE0_AUTOLOAD_CTRL_REG, CACHE_L1_ICACHE0_AUTOLOAD_ENA) && REG_GET_BIT(CACHE_L1_ICACHE1_AUTOLOAD_CTRL_REG, CACHE_L1_ICACHE1_AUTOLOAD_ENA); + } + + return enabled; +} /** - * @brief Get the buses of a particular cache that are mapped to a virtual address range + * @brief Check if L1 DCache autoload is enabled or not + * + * @param cache_id id of the cache in this type and level + * + * @return true: enabled; false: disabled + */ +__attribute__((always_inline)) +static inline bool cache_ll_l1_is_dcache_autoload_enabled(uint32_t cache_id) +{ + bool enabled = false; + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + enabled = REG_GET_BIT(CACHE_L1_DCACHE_AUTOLOAD_CTRL_REG, CACHE_L1_DCACHE_AUTOLOAD_ENA); + } + return enabled; +} + +/** + * @brief Check if L2 Cache auto preload is enabled or not + * + * @param cache_id id of the cache in this type and level + * + * @return true: enabled; false: disabled + */ +__attribute__((always_inline)) +static inline bool cache_ll_l2_is_cache_autoload_enabled(uint32_t cache_id) +{ + bool enabled = false; + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + enabled = REG_GET_BIT(CACHE_L2_CACHE_AUTOLOAD_CTRL_REG, CACHE_L2_CACHE_AUTOLOAD_ENA); + } + return enabled; +} + +/** + * @brief Check if Cache auto preload is enabled or not. + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * + * @return true: enabled; false: disabled + */ +__attribute__((always_inline)) +static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id) +{ + bool enabled = false; + + if (cache_level == 2) { + enabled = cache_ll_l2_is_cache_autoload_enabled(cache_id); + } else if (cache_level == 1) { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + enabled = cache_ll_l1_is_icache_autoload_enabled(cache_id); + break; + case CACHE_TYPE_DATA: + enabled = cache_ll_l1_is_dcache_autoload_enabled(cache_id); + break; + case CACHE_TYPE_ALL: + default: //CACHE_TYPE_ALL + enabled = cache_ll_l1_is_icache_autoload_enabled(CACHE_LL_ID_ALL) && cache_ll_l1_is_dcache_autoload_enabled(cache_id); + break; + } + } + + return enabled; +} + +/*------------------------------------------------------------------------------ + * Disable + *----------------------------------------------------------------------------*/ +/** + * @brief Disable L1 ICache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_disable_icache(uint32_t cache_id) +{ + if (cache_id == 0) { + Cache_Disable_L1_CORE0_ICache(); + } else if (cache_id == 1) { + Cache_Disable_L1_CORE1_ICache(); + } else if (cache_id == CACHE_LL_ID_ALL) { + Cache_Disable_L1_CORE0_ICache(); + Cache_Disable_L1_CORE1_ICache(); + } +} + +/** + * @brief Disable L1 DCache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_disable_dcache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Disable_L1_DCache(); + } +} + +/** + * @brief Disable L2 Cache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_disable_cache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Disable_L2_Cache(); + } +} + +/** + * @brief Disable Cache + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) +{ + HAL_ASSERT(cache_level == 1 || cache_level == 2); + + if (cache_level == 1) { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + cache_ll_l1_disable_icache(cache_id); + break; + case CACHE_TYPE_DATA: + cache_ll_l1_disable_dcache(cache_id); + break; + case CACHE_TYPE_ALL: + default: + cache_ll_l1_disable_icache(CACHE_LL_ID_ALL); + cache_ll_l1_disable_dcache(cache_id); + break; + } + } else { + cache_ll_l2_disable_cache(cache_id); + } +} + +/*------------------------------------------------------------------------------ + * Enable + *----------------------------------------------------------------------------*/ +/** + * @brief Enable L1 ICache + * + * @param cache_id id of the cache in this type and level + * @param inst_autoload_en icache autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_enable_icache(uint32_t cache_id, bool inst_autoload_en) +{ + if (cache_id == 0) { + Cache_Enable_L1_CORE0_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } else if (cache_id == 1) { + Cache_Enable_L1_CORE1_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } else if (cache_id == CACHE_LL_ID_ALL) { + Cache_Enable_L1_CORE1_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + Cache_Enable_L1_CORE0_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } +} + +/** + * @brief Enable L1 DCache + * + * @param cache_id id of the cache in this type and level + * @param data_autoload_en dcache autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_enable_dcache(uint32_t cache_id, bool data_autoload_en) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Enable_L1_DCache(data_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } +} + +/** + * @brief Enable L2 Cache + * + * @param cache_id id of the cache in this type and level + * @param inst_autoload_en autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_enable_cache(uint32_t cache_id, bool autoload_en) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Enable_L2_Cache(autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } +} + +/** + * @brief Enable Cache + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) +{ + HAL_ASSERT(cache_level == 1 || cache_level == 2); + + if (cache_level == 2) { + cache_ll_l2_enable_cache(cache_id, inst_autoload_en); + } else { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + cache_ll_l1_enable_icache(cache_id, inst_autoload_en); + break; + case CACHE_TYPE_DATA: + cache_ll_l1_enable_dcache(cache_id, data_autoload_en); + break; + case CACHE_TYPE_ALL: + default: + cache_ll_l1_enable_dcache(cache_id, data_autoload_en); + cache_ll_l1_enable_icache(cache_id, inst_autoload_en); + break; + } + } +} + +/*------------------------------------------------------------------------------ + * Suspend + *----------------------------------------------------------------------------*/ +/** + * @brief Suspend L1 ICache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_suspend_icache(uint32_t cache_id) +{ + if (cache_id == 0) { + Cache_Suspend_L1_CORE0_ICache(); + } else if (cache_id == 1) { + Cache_Suspend_L1_CORE1_ICache(); + } else if (cache_id == CACHE_LL_ID_ALL) { + Cache_Suspend_L1_CORE0_ICache(); + Cache_Suspend_L1_CORE1_ICache(); + } +} + +/** + * @brief Suspend L1 DCache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_suspend_dcache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Suspend_L1_DCache(); + } +} + +/** + * @brief Suspend L2 Cache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_suspend_cache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Suspend_L2_Cache(); + } +} + +/** + * @brief Suspend Cache + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) +{ + HAL_ASSERT(cache_level == 1 || cache_level == 2); + + if (cache_level == 1) { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + cache_ll_l1_suspend_icache(cache_id); + break; + case CACHE_TYPE_DATA: + cache_ll_l1_suspend_dcache(cache_id); + break; + case CACHE_TYPE_ALL: + default: + cache_ll_l1_suspend_icache(CACHE_LL_ID_ALL); + cache_ll_l1_suspend_dcache(cache_id); + break; + } + } else { + cache_ll_l2_suspend_cache(cache_id); + } +} + +/*------------------------------------------------------------------------------ + * Resume + *----------------------------------------------------------------------------*/ +/** + * @brief Resume L1 ICache + * + * @param cache_id id of the cache in this type and level + * @param inst_autoload_en icache autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_resume_icache(uint32_t cache_id, bool inst_autoload_en) +{ + if (cache_id == 0) { + Cache_Resume_L1_CORE0_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } else if (cache_id == 1) { + Cache_Resume_L1_CORE1_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } else if (cache_id == CACHE_LL_ID_ALL) { + Cache_Resume_L1_CORE1_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + Cache_Resume_L1_CORE0_ICache(inst_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } +} + +/** + * @brief Resume L1 DCache + * + * @param cache_id id of the cache in this type and level + * @param data_autoload_en dcache autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_resume_dcache(uint32_t cache_id, bool data_autoload_en) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Resume_L1_DCache(data_autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } +} + +/** + * @brief Resume L2 Cache + * + * @param cache_id id of the cache in this type and level + * @param inst_autoload_en autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_resume_cache(uint32_t cache_id, bool autoload_en) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Resume_L2_Cache(autoload_en ? CACHE_LL_CACHE_AUTOLOAD : 0); + } +} + +/** + * @brief Resume Cache + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not + */ +__attribute__((always_inline)) +static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) +{ + HAL_ASSERT(cache_level == 1 || cache_level == 2); + + if (cache_level == 2) { + cache_ll_l2_resume_cache(cache_id, inst_autoload_en); + } else { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + cache_ll_l1_resume_icache(cache_id, inst_autoload_en); + break; + case CACHE_TYPE_DATA: + cache_ll_l1_resume_dcache(cache_id, data_autoload_en); + break; + case CACHE_TYPE_ALL: + default: + cache_ll_l1_resume_dcache(cache_id, data_autoload_en); + cache_ll_l1_resume_icache(CACHE_LL_ID_ALL, inst_autoload_en); + break; + } + } +} + +/*------------------------------------------------------------------------------ + * Invalidate + *----------------------------------------------------------------------------*/ +/** + * @brief Invalidate L1 ICache addr + * + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_invalidate_icache_addr(uint32_t cache_id, uint32_t vaddr, uint32_t size) +{ + if (cache_id == 0) { + Cache_Invalidate_Addr(CACHE_MAP_L1_ICACHE_0, vaddr, size); + } else if (cache_id == 1) { + Cache_Invalidate_Addr(CACHE_MAP_L1_ICACHE_1, vaddr, size); + } else if (cache_id == CACHE_LL_ID_ALL) { + Cache_Invalidate_Addr(CACHE_MAP_L1_ICACHE_MASK, vaddr, size); + } +} + +/** + * @brief Invalidate L1 DCache addr + * + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_invalidate_dcache_addr(uint32_t cache_id, uint32_t vaddr, uint32_t size) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE, vaddr, size); + } +} + +/** + * @brief Invalidate L2 Cache addr + * + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_invalidate_cache_addr(uint32_t cache_id, uint32_t vaddr, uint32_t size) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Invalidate_Addr(CACHE_MAP_L2_CACHE, vaddr, size); + } +} + +/** + * @brief Invalidate cache supported addr + * + * Invalidate a cache item + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated + */ +__attribute__((always_inline)) +static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) +{ + if (cache_level == 1 || cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + cache_ll_l1_invalidate_icache_addr(cache_id, vaddr, size); + break; + case CACHE_TYPE_DATA: + cache_ll_l1_invalidate_dcache_addr(cache_id, vaddr, size); + break; + case CACHE_TYPE_ALL: + default: + cache_ll_l1_invalidate_icache_addr(cache_id, vaddr, size); + cache_ll_l1_invalidate_dcache_addr(cache_id, vaddr, size); + break; + } + } + + if (cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) { + cache_ll_l2_invalidate_cache_addr(cache_id, vaddr, size); + } +} + +/*------------------------------------------------------------------------------ + * Writeback + *----------------------------------------------------------------------------*/ +/** + * @brief Writeback L1 DCache addr + * + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be written back + * @param size size of the region to be written back + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_writeback_dcache_addr(uint32_t cache_id, uint32_t vaddr, uint32_t size) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_WriteBack_Addr(CACHE_MAP_L1_DCACHE, vaddr, size); + } +} + +/** + * @brief Writeback L2 Cache addr + * + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be written back + * @param size size of the region to be written back + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_writeback_cache_addr(uint32_t cache_id, uint32_t vaddr, uint32_t size) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_WriteBack_Addr(CACHE_MAP_L2_CACHE, vaddr, size); + } +} + +/** + * @brief Writeback cache supported addr + * + * Writeback a cache item + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be written back + * @param size size of the region to be written back + */ +__attribute__((always_inline)) +static inline void cache_ll_writeback_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) +{ + if (cache_level == 1 || cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) { + switch (type) { + case CACHE_TYPE_DATA: + cache_ll_l1_writeback_dcache_addr(cache_id, vaddr, size); + break; + default: + HAL_ASSERT(false); + } + } + + if (cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) { + cache_ll_l2_writeback_cache_addr(cache_id, vaddr, size); + } +} + +/** + * @brief Writeback L1 DCache all + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_writeback_dcache_all(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_WriteBack_All(CACHE_MAP_L1_DCACHE); + } +} + +/** + * @brief Writeback L2 Cache all + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_writeback_cache_all(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_WriteBack_All(CACHE_MAP_L2_CACHE); + } +} + +/** + * @brief Writeback all + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_writeback_all(uint32_t cache_level, cache_type_t type, uint32_t cache_id) +{ + if (cache_level == 1 || cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) { + switch (type) { + case CACHE_TYPE_DATA: + cache_ll_l1_writeback_dcache_all(cache_id); + break; + default: + HAL_ASSERT(false); + } + } + + if (cache_level == 2 || cache_level == CACHE_LL_LEVEL_ALL) { + cache_ll_l2_writeback_cache_all(cache_id); + } +} + +/*------------------------------------------------------------------------------ + * Freeze + *----------------------------------------------------------------------------*/ +/** + * @brief Freeze L1 ICache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_freeze_icache(uint32_t cache_id) +{ + if (cache_id == 0) { + Cache_Freeze_L1_ICache0_Enable(CACHE_FREEZE_ACK_BUSY); + } else if (cache_id == 1) { + Cache_Freeze_L1_ICache1_Enable(CACHE_FREEZE_ACK_BUSY); + } else if (cache_id == CACHE_LL_ID_ALL) { + Cache_Freeze_L1_ICache0_Enable(CACHE_FREEZE_ACK_BUSY); + Cache_Freeze_L1_ICache1_Enable(CACHE_FREEZE_ACK_BUSY); + } +} + +/** + * @brief Freeze L1 DCache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_freeze_dcache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Freeze_L1_DCache_Enable(CACHE_FREEZE_ACK_BUSY); + } +} + +/** + * @brief Freeze L2 Cache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_freeze_cache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Freeze_L2_Cache_Enable(CACHE_FREEZE_ACK_BUSY); + } +} + +/** + * @brief Freeze Cache + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_freeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) +{ + HAL_ASSERT(cache_level == 1 || cache_level == 2); + + if (cache_level == 1) { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + cache_ll_l1_freeze_icache(cache_id); + break; + case CACHE_TYPE_DATA: + cache_ll_l1_freeze_dcache(cache_id); + break; + default: + cache_ll_l1_freeze_icache(CACHE_LL_ID_ALL); + cache_ll_l1_freeze_dcache(cache_id); + break; + } + } else { + cache_ll_l2_freeze_cache(cache_id); + } +} + +/*------------------------------------------------------------------------------ + * Unfreeze + *----------------------------------------------------------------------------*/ +/** + * @brief Unfreeze L1 ICache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_unfreeze_icache(uint32_t cache_id) +{ + if (cache_id == 0) { + Cache_Freeze_L1_ICache0_Disable(); + } else if (cache_id == 1) { + Cache_Freeze_L1_ICache1_Disable(); + } else if (cache_id == CACHE_LL_ID_ALL) { + Cache_Freeze_L1_ICache1_Disable(); + Cache_Freeze_L1_ICache0_Disable(); + } +} + +/** + * @brief Unfreeze L1 DCache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_unfreeze_dcache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Freeze_L1_DCache_Disable(); + } +} + +/** + * @brief Unfreeze L2 Cache + * + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_l2_unfreeze_cache(uint32_t cache_id) +{ + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + Cache_Freeze_L2_Cache_Disable(); + } +} + +/** + * @brief Unfreeze Cache + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + */ +__attribute__((always_inline)) +static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) +{ + HAL_ASSERT(cache_level == 1 || cache_level == 2); + + if (cache_level == 2) { + cache_ll_l2_unfreeze_cache(cache_id); + } else { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + cache_ll_l1_unfreeze_icache(cache_id); + break; + case CACHE_TYPE_DATA: + cache_ll_l1_unfreeze_dcache(cache_id); + break; + case CACHE_TYPE_ALL: + default: + cache_ll_l1_unfreeze_dcache(cache_id); + cache_ll_l1_unfreeze_icache(CACHE_LL_ID_ALL); + break; + } + } +} + +/*------------------------------------------------------------------------------ + * Cache Line Size + *----------------------------------------------------------------------------*/ +/** + * @brief Get ICache line size, in bytes + * + * @param cache_id id of the cache in this type and level + * + * @return ICache line size, in bytes + */ +__attribute__((always_inline)) +static inline uint32_t cache_ll_l1_icache_get_line_size(uint32_t cache_id) +{ + uint32_t size = 0; + + if (cache_id == 0 || cache_id == 1 || cache_id == CACHE_LL_ID_ALL) { + size = 64; + } + + return size; +} + +/** + * @brief Get DCache line size, in bytes + * + * @param cache_id id of the cache in this type and level + * + * @return DCache line size, in bytes + */ +__attribute__((always_inline)) +static inline uint32_t cache_ll_l1_dcache_get_line_size(uint32_t cache_id) +{ + uint32_t size = 0; + + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + size = 64; + } + + return size; +} + +/** + * @brief Get L2 Cache line size, in bytes + * + * @param cache_id id of the cache in this type and level + * + * @return L2 Cache line size, in bytes + */ +__attribute__((always_inline)) +static inline uint32_t cache_ll_l2_cache_get_line_size(uint32_t cache_id) +{ + uint32_t size = 0; + + if (cache_id == 0 || cache_id == CACHE_LL_ID_ALL) { + size = Cache_Get_L2_Cache_Line_Size(); + } + + return size; +} + +/** + * @brief Get Cache line size, in bytes + * + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * + * @return Cache line size, in bytes + */ +__attribute__((always_inline)) +static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id) +{ + HAL_ASSERT(cache_level == 1 || cache_level == 2); + + uint32_t size = 0; + if (cache_level == 2) { + size = cache_ll_l2_cache_get_line_size(cache_id); + } else { + switch (type) { + case CACHE_TYPE_INSTRUCTION: + size = cache_ll_l1_icache_get_line_size(cache_id); + break; + case CACHE_TYPE_DATA: + size = cache_ll_l1_dcache_get_line_size(cache_id); + break; + case CACHE_TYPE_ALL: + default: + break; + } + } + + return size; +} + +/*------------------------------------------------------------------------------ + * Cache Bus + *----------------------------------------------------------------------------*/ +/** + * @brief Get the buses of a particular L1 Cache that are mapped to a virtual address range * * External virtual address can only be accessed when the involved cache buses are enabled. * This API is to get the cache buses where the memory region (from `vaddr_start` to `vaddr_start + len`) reside. * - * This api in esp32p4 is not used. There is no hardware interface to mask the i/dbus to l2 cache. Needs check, TODO: IDF-7516 - * * @param cache_id cache ID (when l1 cache is per core) * @param vaddr_start virtual address start * @param len vaddr length */ -#if !BOOTLOADER_BUILD __attribute__((always_inline)) -#endif static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + return (cache_bus_mask_t)(CACHE_LL_DEFAULT_IBUS_MASK | CACHE_LL_DEFAULT_DBUS_MASK); +} + +/** + * Enable the L1 Cache Buses + * + * @param cache_id cache ID (when l1 cache is per core) + * @param mask To know which buses should be enabled + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) +{ + //not used, for compatibility +} + +/** + * Disable the Cache Buses + * + * @param cache_id cache ID (when l1 cache is per core) + * @param mask To know which buses should be disabled + */ +__attribute__((always_inline)) +static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) +{ + //not used, for compatibility +} + +/** + * @brief Get the buses of a particular cache that are mapped to a virtual address range + * + * @param cache_id cache ID + * @param vaddr_start virtual address start + * @param len vaddr length + */ +__attribute__((always_inline)) +static inline cache_bus_mask_t cache_ll_l2_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) +{ + (void)cache_id; cache_bus_mask_t mask = (cache_bus_mask_t)0; uint32_t vaddr_end = vaddr_start + len - 1; @@ -70,38 +974,40 @@ static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t v mask = (cache_bus_mask_t)(mask | CACHE_BUS_IBUS1); mask = (cache_bus_mask_t)(mask | CACHE_BUS_DBUS1); } else { - HAL_ASSERT(0); //Out of region + HAL_ASSERT(0); } return mask; } /** - * Enable the Cache Buses + * @brief Get Cache level and the ID of the vaddr * - * This api is not used in esp32p4. Needs check, TODO: IDF-7516 + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id * - * @param cache_id cache ID (when l1 cache is per core) - * @param mask To know which buses should be enabled + * @return true for valid */ -#if !BOOTLOADER_BUILD __attribute__((always_inline)) -#endif -static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) { -} + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; -/** - * Disable the Cache Buses - * - * This api is not used in esp32p4. Needs check, TODO: IDF-7516 - * - * @param cache_id cache ID (when l1 cache is per core) - * @param mask To know which buses should be disabled - */ -__attribute__((always_inline)) -static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) -{ + if (vaddr_start >= SOC_IRAM0_ADDRESS_LOW && vaddr_end < SOC_IRAM0_ADDRESS_HIGH) { + *out_level = 1; + *out_id = CACHE_LL_ID_ALL; + valid = true; + } else if (vaddr_start >= SOC_DRAM_FLASH_ADDRESS_LOW && vaddr_end < SOC_DRAM_PSRAM_ADDRESS_HIGH) { + //PSRAM vaddr is right after the FLASH vaddr + *out_level = 2; + *out_id = CACHE_LL_ID_ALL; + valid = true; + } + + return valid; } /*------------------------------------------------------------------------------ diff --git a/components/hal/esp32s2/include/hal/cache_ll.h b/components/hal/esp32s2/include/hal/cache_ll.h index 4e575c9487..0a2ecbde7a 100644 --- a/components/hal/esp32s2/include/hal/cache_ll.h +++ b/components/hal/esp32s2/include/hal/cache_ll.h @@ -23,6 +23,11 @@ extern "C" { #define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0 #define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_IBUS2 +#define CACHE_LL_ID_ALL 1 //All of the caches in a type and level, make this value greater than any ID +#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels #define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<0) #define CACHE_LL_L1_DCACHE_AUTOLOAD (1<<0) @@ -57,15 +62,18 @@ static inline bool cache_ll_l1_is_dcache_autoload_enabled(void) } /** - * @brief Check if ICache or DCache auto preload is enabled or not + * @brief Check if Cache auto preload is enabled or not. * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * * @return true: enabled; false: disabled */ __attribute__((always_inline)) -static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) +static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); bool enabled = false; switch (type) { @@ -101,12 +109,14 @@ static inline void cache_ll_l1_disable_dcache(void) } /** - * @brief Disable ICache or DCache or both + * @brief Disable Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_disable_cache(cache_type_t type) +static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { switch (type) { @@ -146,16 +156,16 @@ static inline void cache_ll_l1_enable_dcache(bool data_autoload_en) } /** - * @brief Enable ICache or DCache or both + * @brief Enable Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_enable_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { switch (type) { @@ -191,12 +201,14 @@ static inline void cache_ll_l1_suspend_dcache(void) } /** - * @brief Suspend ICache or DCache or both + * @brief Suspend Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_suspend_cache(cache_type_t type) +static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { switch (type) { @@ -236,16 +248,16 @@ static inline void cache_ll_l1_resume_dcache(bool data_autoload_en) } /** - * @brief Resume ICache or DCache or both + * @brief Resume Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { switch (type) { @@ -271,7 +283,7 @@ static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_e */ __attribute__((always_inline)) static inline bool cache_ll_l1_is_icache_enabled(uint32_t cache_id){ - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); bool enabled; enabled = REG_GET_BIT(EXTMEM_PRO_ICACHE_CTRL_REG, EXTMEM_PRO_ICACHE_ENABLE); @@ -288,7 +300,7 @@ static inline bool cache_ll_l1_is_icache_enabled(uint32_t cache_id){ __attribute__((always_inline)) static inline bool cache_ll_l1_is_dcache_enabled(uint32_t cache_id) { - HAL_ASSERT(cache_id == 0); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); bool enabled; enabled = REG_GET_BIT(EXTMEM_PRO_DCACHE_CTRL_REG, EXTMEM_PRO_DCACHE_ENABLE); @@ -324,13 +336,16 @@ static inline bool cache_ll_is_cache_enabled(cache_type_t type) /** * @brief Invalidate cache supported addr * - * Invalidate a Cache item for either ICache or DCache. + * Invalidate a cache item * - * @param vaddr Start address of the region to be invalidated - * @param size Size of the region to be invalidated + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated */ __attribute__((always_inline)) -static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_Invalidate_Addr(vaddr, size); } @@ -338,13 +353,16 @@ static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) /** * @brief Writeback cache supported addr * - * Writeback the DCache item to external memory + * Writeback a cache item * - * @param vaddr Start address of the region to writeback - * @param size Size of the region to writeback + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be written back + * @param size size of the region to be written back */ __attribute__((always_inline)) -static inline void cache_ll_writeback_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_writeback_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_WriteBack_Addr(vaddr, size); } @@ -376,14 +394,16 @@ static inline uint32_t cache_ll_l1_dcache_get_line_size(void) } /** - * @brief Get ICache or DCache line size, in bytes + * @brief Get Cache line size, in bytes * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * - * @return ICache/DCache line size, in bytes + * @return Cache line size, in bytes */ __attribute__((always_inline)) -static inline uint32_t cache_ll_get_line_size(cache_type_t type) +static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { uint32_t size = 0; switch (type) @@ -504,6 +524,32 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m REG_SET_BIT(EXTMEM_PRO_DCACHE_CTRL1_REG, dbus_mask); } +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid + */ +__attribute__((always_inline)) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; + + valid |= ((vaddr_start >= SOC_DROM0_ADDRESS_LOW) && (vaddr_end < SOC_DROM0_ADDRESS_HIGH)) || ((vaddr_start >= SOC_DPORT_CACHE_ADDRESS_LOW) && (vaddr_end < SOC_DRAM0_CACHE_ADDRESS_HIGH)); + valid |= ((vaddr_start >= SOC_IRAM0_CACHE_ADDRESS_LOW) && (vaddr_end < SOC_IRAM1_ADDRESS_HIGH)); + + if (valid) { + *out_level = 1; + *out_id = 0; + } + + return valid; +} #ifdef __cplusplus } diff --git a/components/hal/esp32s3/include/hal/cache_ll.h b/components/hal/esp32s3/include/hal/cache_ll.h index 9a953b2528..215473731e 100644 --- a/components/hal/esp32s3/include/hal/cache_ll.h +++ b/components/hal/esp32s3/include/hal/cache_ll.h @@ -38,6 +38,11 @@ extern "C" { #define CACHE_LL_L1_ILG_EVENT_ICACHE_PRELOAD_OP_FAULT (1<<1) #define CACHE_LL_L1_ILG_EVENT_ICACHE_SYNC_OP_FAULT (1<<0) +#define CACHE_LL_ID_ALL 2 //All of the caches in a type and level, make this value greater than any id +#define CACHE_LL_LEVEL_INT_MEM 0 //Cache level for accessing internal mem +#define CACHE_LL_LEVEL_EXT_MEM 1 //Cache level for accessing external mem +#define CACHE_LL_LEVEL_ALL 2 //All of the cache levels, make this value greater than any level +#define CACHE_LL_LEVEL_NUMS 1 //Number of cache levels #define CACHE_LL_L1_ICACHE_AUTOLOAD (1<<2) #define CACHE_LL_L1_DCACHE_AUTOLOAD (1<<2) @@ -72,15 +77,18 @@ static inline bool cache_ll_l1_is_dcache_autoload_enabled(void) } /** - * @brief Check if ICache or DCache auto preload is enabled or not + * @brief Check if Cache auto preload is enabled or not. * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * * @return true: enabled; false: disabled */ __attribute__((always_inline)) -static inline bool cache_ll_is_cache_autoload_enabled(cache_type_t type) +static inline bool cache_ll_is_cache_autoload_enabled(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); bool enabled = false; switch (type) { @@ -116,12 +124,14 @@ static inline void cache_ll_l1_disable_dcache(void) } /** - * @brief Disable ICache or DCache or both + * @brief Disable Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_disable_cache(cache_type_t type) +static inline void cache_ll_disable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { switch (type) { @@ -161,16 +171,16 @@ static inline void cache_ll_l1_enable_dcache(bool data_autoload_en) } /** - * @brief Enable ICache or DCache or both + * @brief Enable Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_enable_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_enable_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { switch (type) { @@ -206,12 +216,14 @@ static inline void cache_ll_l1_suspend_dcache(void) } /** - * @brief Suspend ICache or DCache or both + * @brief Suspend Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_suspend_cache(cache_type_t type) +static inline void cache_ll_suspend_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { switch (type) { @@ -251,16 +263,16 @@ static inline void cache_ll_l1_resume_dcache(bool data_autoload_en) } /** - * @brief Resume ICache or DCache or both + * @brief Resume Cache * - * @param type see `cache_type_t` - * - * @param data_autoload_en Dcache auto preload enabled - * - * @param inst_autoload_en Icache auto preload enabled + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param data_autoload_en data autoload enabled or not + * @param inst_autoload_en inst autoload enabled or not */ __attribute__((always_inline)) -static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_en, bool data_autoload_en) +static inline void cache_ll_resume_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id, bool inst_autoload_en, bool data_autoload_en) { switch (type) { @@ -287,7 +299,7 @@ static inline void cache_ll_resume_cache(cache_type_t type, bool inst_autoload_e __attribute__((always_inline)) static inline bool cache_ll_l1_is_icache_enabled(uint32_t cache_id) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); return REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE); } @@ -301,7 +313,7 @@ static inline bool cache_ll_l1_is_icache_enabled(uint32_t cache_id) __attribute__((always_inline)) static inline bool cache_ll_l1_is_dcache_enabled(uint32_t cache_id) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); return REG_GET_BIT(EXTMEM_DCACHE_CTRL_REG, EXTMEM_DCACHE_ENABLE); } @@ -334,13 +346,16 @@ static inline bool cache_ll_is_cache_enabled(cache_type_t type) /** * @brief Invalidate cache supported addr * - * Invalidate a Cache item for either ICache or DCache. + * Invalidate a cache item * - * @param vaddr Start address of the region to be invalidated - * @param size Size of the region to be invalidated + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be invalidated + * @param size size of the region to be invalidated */ __attribute__((always_inline)) -static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_invalidate_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_Invalidate_Addr(vaddr, size); } @@ -348,13 +363,16 @@ static inline void cache_ll_invalidate_addr(uint32_t vaddr, uint32_t size) /** * @brief Writeback cache supported addr * - * Writeback the DCache item to external memory + * Writeback a cache item * - * @param vaddr Start address of the region to writeback - * @param size Size of the region to writeback + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level + * @param vaddr start address of the region to be written back + * @param size size of the region to be written back */ __attribute__((always_inline)) -static inline void cache_ll_writeback_addr(uint32_t vaddr, uint32_t size) +static inline void cache_ll_writeback_addr(uint32_t cache_level, cache_type_t type, uint32_t cache_id, uint32_t vaddr, uint32_t size) { Cache_WriteBack_Addr(vaddr, size); } @@ -378,12 +396,14 @@ static inline void cache_ll_l1_freeze_dcache(void) } /** - * @brief Freeze ICache or DCache or both + * @brief Freeze Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_freeze_cache(cache_type_t type) +static inline void cache_ll_freeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { switch (type) { @@ -419,12 +439,14 @@ static inline void cache_ll_l1_unfreeze_dcache(void) } /** - * @brief Unfreeze ICache or DCache or both + * @brief Unfreeze Cache * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level */ __attribute__((always_inline)) -static inline void cache_ll_unfreeze_cache(cache_type_t type) +static inline void cache_ll_unfreeze_cache(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { switch (type) { @@ -468,14 +490,16 @@ static inline uint32_t cache_ll_l1_dcache_get_line_size(void) } /** - * @brief Get ICache or DCache line size, in bytes + * @brief Get Cache line size, in bytes * - * @param type see `cache_type_t` + * @param cache_level level of the cache + * @param type see `cache_type_t` + * @param cache_id id of the cache in this type and level * - * @return ICache/DCache line size, in bytes + * @return Cache line size, in bytes */ __attribute__((always_inline)) -static inline uint32_t cache_ll_get_line_size(cache_type_t type) +static inline uint32_t cache_ll_get_line_size(uint32_t cache_level, cache_type_t type, uint32_t cache_id) { uint32_t size = 0; switch (type) @@ -508,7 +532,7 @@ __attribute__((always_inline)) #endif static inline cache_bus_mask_t cache_ll_l1_get_bus(uint32_t cache_id, uint32_t vaddr_start, uint32_t len) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); cache_bus_mask_t mask = (cache_bus_mask_t)0; uint32_t vaddr_end = vaddr_start + len - 1; @@ -534,7 +558,7 @@ __attribute__((always_inline)) #endif static inline void cache_ll_l1_enable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32s3, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -566,7 +590,7 @@ __attribute__((always_inline)) static inline cache_bus_mask_t cache_ll_l1_get_enabled_bus(uint32_t cache_id) { cache_bus_mask_t mask = (cache_bus_mask_t)0; - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32s3, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first uint32_t ibus_mask = REG_READ(EXTMEM_ICACHE_CTRL1_REG); @@ -595,7 +619,7 @@ static inline cache_bus_mask_t cache_ll_l1_get_enabled_bus(uint32_t cache_id) __attribute__((always_inline)) static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t mask) { - HAL_ASSERT(cache_id == 0 || cache_id == 1); + HAL_ASSERT(cache_id <= CACHE_LL_ID_ALL); //On esp32s3, only `CACHE_BUS_IBUS0` and `CACHE_BUS_DBUS0` are supported. Use `cache_ll_l1_get_bus()` to get your bus first HAL_ASSERT((mask & (CACHE_BUS_IBUS1 | CACHE_BUS_IBUS2| CACHE_BUS_DBUS1 | CACHE_BUS_DBUS2)) == 0); @@ -616,6 +640,33 @@ static inline void cache_ll_l1_disable_bus(uint32_t cache_id, cache_bus_mask_t m REG_SET_BIT(EXTMEM_DCACHE_CTRL1_REG, dbus_mask); } +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid + */ +__attribute__((always_inline)) +static inline bool cache_ll_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id) +{ + bool valid = false; + uint32_t vaddr_end = vaddr_start + len - 1; + + valid |= (SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_IRAM0_CACHE(vaddr_end)); + valid |= (SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_start) && SOC_ADDRESS_IN_DRAM0_CACHE(vaddr_end)); + + if (valid) { + *out_level = 1; + *out_id = 0; + } + + return valid; +} + /*------------------------------------------------------------------------------ * Interrupt *----------------------------------------------------------------------------*/ diff --git a/components/hal/include/hal/cache_hal.h b/components/hal/include/hal/cache_hal.h index 8464c48f88..36926976c7 100644 --- a/components/hal/include/hal/cache_hal.h +++ b/components/hal/include/hal/cache_hal.h @@ -1,6 +1,6 @@ /* - * SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD + * SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD * * SPDX-License-Identifier: Apache-2.0 */ @@ -21,105 +21,129 @@ extern "C" { void cache_hal_init(void); /** - * @brief Disable cache + * @brief Disable Cache * - * Disable the ICache or DCache or both, all the items in the corresponding Cache(s) will be invalideated. - * Next request to these items will trigger a transaction to the external memory (flash / psram) + * Disable the ICache or DCache or both, of a certain level or all levels. + * All the items in the corresponding Cache(s) will be invalideated. + * Next request to these items will trigger a transaction to the physical memory * * @note If the autoload feature is enabled, this API will return until the ICache autoload is disabled. * - * @param type see `cache_type_t` + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` */ -void cache_hal_disable(cache_type_t type); +void cache_hal_disable(uint32_t cache_level, cache_type_t type); /** - * @brief Enable cache + * @brief Enable Cache * - * Enable the ICache or DCache or both. + * Enable the ICache or DCache or both, of a certain level or all levels. * - * @param type see `cache_type_t` + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` */ -void cache_hal_enable(cache_type_t type); +void cache_hal_enable(uint32_t cache_level, cache_type_t type); /** - * @brief Suspend cache + * @brief Suspend Cache * - * Suspend the ICache or DCache or both,suspends the CPU access to cache for a while, without invalidation. + * Suspend the ICache or DCache or both, of a certain level or all levels. + * This API suspends the CPU access to cache for a while, without invalidation. * - * @param type see `cache_type_t` - * - * @return Current status of corresponding Cache(s) + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` */ -void cache_hal_suspend(cache_type_t type); +void cache_hal_suspend(uint32_t cache_level, cache_type_t type); /** - * @brief Resume cache + * @brief Resume Cache * - * Resume the ICache or DCache or both. + * Resume the ICache or DCache or both, of a certain level or all levels. * - * @param type see `cache_type_t` + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` */ -void cache_hal_resume(cache_type_t type); +void cache_hal_resume(uint32_t cache_level, cache_type_t type); /** * @brief Check if corresponding cache is enabled or not * - * @param type see `cache_type_t` + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` * * @return true: enabled; false: disabled */ -bool cache_hal_is_cache_enabled(cache_type_t type); +bool cache_hal_is_cache_enabled(uint32_t cache_level, cache_type_t type); /** - * @brief Invalidate cache supported addr + * @brief Invalidate Cache supported addr * * Invalidate a Cache item for either ICache or DCache. * * @param vaddr Start address of the region to be invalidated * @param size Size of the region to be invalidated + * + * @return True for valid address. No operation if invalid */ -void cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size); +bool cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size); #if SOC_CACHE_WRITEBACK_SUPPORTED /** - * @brief Writeback cache supported addr + * @brief Writeback Cache supported addr * * Writeback the DCache item to external memory * * @param vaddr Start address of the region to writeback * @param size Size of the region to writeback + * + * @return True for valid address. No operation if invalid */ -void cache_hal_writeback_addr(uint32_t vaddr, uint32_t size); +bool cache_hal_writeback_addr(uint32_t vaddr, uint32_t size); #endif //#if SOC_CACHE_WRITEBACK_SUPPORTED #if SOC_CACHE_FREEZE_SUPPORTED /** - * @brief Freeze cache + * @brief Freeze Cache * * Freeze cache, CPU access to cache will be suspended, until the cache is unfrozen. * - * @param type see `cache_type_t` + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` */ -void cache_hal_freeze(cache_type_t type); +void cache_hal_freeze(uint32_t cache_level, cache_type_t type); /** * @brief Unfreeze cache * * Unfreeze cache, CPU access to cache will be restored * - * @param type see `cache_type_t` + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` */ -void cache_hal_unfreeze(cache_type_t type); +void cache_hal_unfreeze(uint32_t cache_level, cache_type_t type); #endif //#if SOC_CACHE_FREEZE_SUPPORTED /** * @brief Get cache line size, in bytes * - * @param type see `cache_type_t` + * @param cache_level Level of the Cache(s) + * @param type see `cache_type_t` * * @return cache line size, in bytes */ -uint32_t cache_hal_get_cache_line_size(cache_type_t type); +uint32_t cache_hal_get_cache_line_size(uint32_t cache_level, cache_type_t type); + +/** + * @brief Get Cache level and the ID of the vaddr + * + * @param vaddr_start virtual address start + * @param len vaddr length + * @param out_level cache level + * @param out_id cache id + * + * @return true for valid, false for invalid addr or null pointer + */ +bool cache_hal_vaddr_to_cache_level_id(uint32_t vaddr_start, uint32_t len, uint32_t *out_level, uint32_t *out_id); #ifdef __cplusplus } diff --git a/components/hal/mmu_hal.c b/components/hal/mmu_hal.c index 622de3b4f9..b60fae6483 100644 --- a/components/hal/mmu_hal.c +++ b/components/hal/mmu_hal.c @@ -20,11 +20,6 @@ void mmu_hal_init(void) ROM_Boot_Cache_Init(); #endif -//TODO: IDF-7516 -#if CONFIG_IDF_TARGET_ESP32P4 - Cache_Invalidate_All(CACHE_MAP_L2_CACHE); -#endif - mmu_ll_set_page_size(0, CONFIG_MMU_PAGE_SIZE); mmu_hal_unmap_all(); } diff --git a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in index 050a05c4af..3a120de79b 100644 --- a/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32p4/include/soc/Kconfig.soc_caps.in @@ -203,11 +203,15 @@ config SOC_SHARED_IDCACHE_SUPPORTED bool default y +config SOC_CACHE_WRITEBACK_SUPPORTED + bool + default y + config SOC_CACHE_FREEZE_SUPPORTED bool default y -config SOC_CACHE_L2_SUPPORTED +config SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE bool default y diff --git a/components/soc/esp32p4/include/soc/ext_mem_defs.h b/components/soc/esp32p4/include/soc/ext_mem_defs.h index 2b05bba681..0e428d116e 100644 --- a/components/soc/esp32p4/include/soc/ext_mem_defs.h +++ b/components/soc/esp32p4/include/soc/ext_mem_defs.h @@ -19,6 +19,11 @@ extern "C" { #define SOC_MMU_PAGE_SIZE 0x10000 #endif +#define SOC_IRAM0_ADDRESS_LOW 0x4ff00000 +#define SOC_IRAM0_ADDRESS_HIGH 0x4ffc0000 + +#define SOC_DRAM0_ADDRESS_LOW SOC_IRAM0_ADDRESS_LOW +#define SOC_DRAM0_ADDRESS_HIGH SOC_IRAM0_ADDRESS_HIGH #define SOC_IRAM0_CACHE_ADDRESS_LOW 0x40000000 #define SOC_IRAM0_CACHE_ADDRESS_HIGH 0x50000000 diff --git a/components/soc/esp32p4/include/soc/soc_caps.h b/components/soc/esp32p4/include/soc/soc_caps.h index 0ae4f43de6..e4ee0d074d 100644 --- a/components/soc/esp32p4/include/soc/soc_caps.h +++ b/components/soc/esp32p4/include/soc/soc_caps.h @@ -132,8 +132,9 @@ /*-------------------------- CACHE CAPS --------------------------------------*/ #define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data +#define SOC_CACHE_WRITEBACK_SUPPORTED 1 #define SOC_CACHE_FREEZE_SUPPORTED 1 -#define SOC_CACHE_L2_SUPPORTED 1 +#define SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE 1 /*-------------------------- CPU CAPS ----------------------------------------*/ #define SOC_CPU_CORES_NUM (2U) diff --git a/components/spi_flash/cache_utils.c b/components/spi_flash/cache_utils.c index c1e7e75c76..9c145f1ee7 100644 --- a/components/spi_flash/cache_utils.c +++ b/components/spi_flash/cache_utils.c @@ -360,17 +360,17 @@ void IRAM_ATTR spi_flash_enable_cache(uint32_t cpuid) void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t *saved_state) { - cache_hal_suspend(CACHE_TYPE_ALL); + cache_hal_suspend(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state) { - cache_hal_resume(CACHE_TYPE_ALL); + cache_hal_resume(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } bool IRAM_ATTR spi_flash_cache_enabled(void) { - return cache_hal_is_cache_enabled(CACHE_TYPE_ALL); + return cache_hal_is_cache_enabled(CACHE_LL_LEVEL_EXT_MEM, CACHE_TYPE_ALL); } #if CONFIG_IDF_TARGET_ESP32S2 @@ -918,15 +918,15 @@ void esp_config_l2_cache_mode(void) { cache_size_t cache_size; cache_line_size_t cache_line_size; -#if CONFIG_ESP32P4_L2_CACHE_128KB +#if CONFIG_CACHE_L2_CACHE_128KB cache_size = CACHE_SIZE_128K; -#elif CONFIG_ESP32P4_L2_CACHE_256KB +#elif CONFIG_CACHE_L2_CACHE_256KB cache_size = CACHE_SIZE_256K; #else cache_size = CACHE_SIZE_512K; #endif -#if CONFIG_ESP32P4_L2_CACHE_LINE_64B +#if CONFIG_CACHE_L2_CACHE_LINE_64B cache_line_size = CACHE_LINE_SIZE_64B; #else cache_line_size = CACHE_LINE_SIZE_128B; diff --git a/components/spi_flash/flash_mmap.c b/components/spi_flash/flash_mmap.c index c620a2d86f..da4e991566 100644 --- a/components/spi_flash/flash_mmap.c +++ b/components/spi_flash/flash_mmap.c @@ -30,9 +30,6 @@ #if CONFIG_IDF_TARGET_ESP32 #include "esp_private/esp_cache_esp32_private.h" -#elif CONFIG_IDF_TARGET_ESP32P4 -//TODO: IDF-7516 -#include "esp32p4/rom/cache.h" #endif #include "esp_private/cache_utils.h" @@ -377,12 +374,7 @@ IRAM_ATTR bool spi_flash_check_and_flush_cache(size_t start_addr, size_t length) return true; #else // CONFIG_IDF_TARGET_ESP32 if (vaddr != NULL) { -#if CONFIG_IDF_TARGET_ESP32P4 - //TODO: IDF-7516 - Cache_Invalidate_Addr(CACHE_MAP_L1_DCACHE | CACHE_MAP_L2_CACHE, (uint32_t)vaddr, SPI_FLASH_MMU_PAGE_SIZE); -#else cache_hal_invalidate_addr((uint32_t)vaddr, SPI_FLASH_MMU_PAGE_SIZE); -#endif ret = true; } #endif // CONFIG_IDF_TARGET_ESP32 diff --git a/examples/peripherals/lcd/i80_controller/main/i80_controller_example_main.c b/examples/peripherals/lcd/i80_controller/main/i80_controller_example_main.c index 13a72fc0af..0d0140d77b 100644 --- a/examples/peripherals/lcd/i80_controller/main/i80_controller_example_main.c +++ b/examples/peripherals/lcd/i80_controller/main/i80_controller_example_main.c @@ -17,6 +17,7 @@ #include "driver/i2c.h" #include "esp_err.h" #include "esp_log.h" +#include "esp_dma_utils.h" #include "lvgl.h" #if CONFIG_EXAMPLE_LCD_TOUCH_CONTROLLER_GT911 #include "esp_lcd_touch_gt911.h" @@ -406,13 +407,12 @@ void app_main(void) // it's recommended to choose the size of the draw buffer(s) to be at least 1/10 screen sized lv_color_t *buf1 = NULL; lv_color_t *buf2 = NULL; + uint32_t malloc_flags = 0; #if CONFIG_EXAMPLE_LCD_I80_COLOR_IN_PSRAM - buf1 = heap_caps_aligned_alloc(EXAMPLE_PSRAM_DATA_ALIGNMENT, EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT); - buf2 = heap_caps_aligned_alloc(EXAMPLE_PSRAM_DATA_ALIGNMENT, EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), MALLOC_CAP_SPIRAM | MALLOC_CAP_8BIT); -#else - buf1 = heap_caps_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); - buf2 = heap_caps_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL); + malloc_flags |= ESP_DMA_MALLOC_FLAG_PSRAM; #endif // CONFIG_EXAMPLE_LCD_I80_COLOR_IN_PSRAM + ESP_ERROR_CHECK(esp_dma_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), malloc_flags, (void *)&buf1, NULL)); + ESP_ERROR_CHECK(esp_dma_malloc(EXAMPLE_LCD_H_RES * 100 * sizeof(lv_color_t), malloc_flags, (void *)&buf2, NULL)); assert(buf1); assert(buf2); ESP_LOGI(TAG, "buf1@%p, buf2@%p", buf1, buf2);