Merge branch 'bugfix/fix_cache_data_mem_corrupt_after_sleep_v5.0' into 'release/v5.0'

fix(lightsleep): Suspend cache before goto sleep to avoid cache load wrong data (backport v5.0)

See merge request espressif/esp-idf!25089
This commit is contained in:
Jiang Jiang Jian
2023-08-03 11:39:05 +08:00
21 changed files with 410 additions and 109 deletions

View File

@@ -6,6 +6,7 @@
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include "esp_attr.h"
#include "soc/soc.h" #include "soc/soc.h"
#include "soc/rtc.h" #include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h" #include "soc/rtc_cntl_reg.h"
@@ -22,6 +23,8 @@
#include "soc/regi2c_dig_reg.h" #include "soc/regi2c_dig_reg.h"
#include "esp_efuse.h" #include "esp_efuse.h"
static const DRAM_ATTR rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
/** /**
* Configure whether certain peripherals are powered down in deep sleep * Configure whether certain peripherals are powered down in deep sleep
* @param cfg power down flags as rtc_sleep_pu_config_t structure * @param cfg power down flags as rtc_sleep_pu_config_t structure
@@ -146,7 +149,6 @@ void rtc_sleep_get_default_config(uint32_t sleep_flags, rtc_sleep_config_t *out_
void rtc_sleep_init(rtc_sleep_config_t cfg) void rtc_sleep_init(rtc_sleep_config_t cfg)
{ {
if (cfg.lslp_mem_inf_fpu) { if (cfg.lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }
@@ -238,7 +240,6 @@ static uint32_t rtc_sleep_finish(uint32_t lslp_mem_inf_fpu)
/* restore config if it is a light sleep */ /* restore config if it is a light sleep */
if (lslp_mem_inf_fpu) { if (lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }
return reject; return reject;

View File

@@ -6,6 +6,7 @@
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include "esp_attr.h"
#include "soc/soc.h" #include "soc/soc.h"
#include "soc/rtc.h" #include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h" #include "soc/rtc_cntl_reg.h"
@@ -27,6 +28,8 @@
#include "soc/systimer_reg.h" #include "soc/systimer_reg.h"
#endif #endif
static const DRAM_ATTR rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
/** /**
* Configure whether certain peripherals are powered down in deep sleep * Configure whether certain peripherals are powered down in deep sleep
* @param cfg power down flags as rtc_sleep_pu_config_t structure * @param cfg power down flags as rtc_sleep_pu_config_t structure
@@ -168,7 +171,6 @@ void rtc_sleep_get_default_config(uint32_t sleep_flags, rtc_sleep_config_t *out_
void rtc_sleep_init(rtc_sleep_config_t cfg) void rtc_sleep_init(rtc_sleep_config_t cfg)
{ {
if (cfg.lslp_mem_inf_fpu) { if (cfg.lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }
if (cfg.wifi_pd_en) { if (cfg.wifi_pd_en) {
@@ -374,7 +376,6 @@ static uint32_t rtc_sleep_finish(uint32_t lslp_mem_inf_fpu)
/* restore config if it is a light sleep */ /* restore config if it is a light sleep */
if (lslp_mem_inf_fpu) { if (lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }
return reject; return reject;

View File

@@ -6,6 +6,7 @@
#include <stdint.h> #include <stdint.h>
#include <stdlib.h> #include <stdlib.h>
#include "esp_attr.h"
#include "soc/soc.h" #include "soc/soc.h"
#include "soc/rtc.h" #include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h" #include "soc/rtc_cntl_reg.h"
@@ -44,6 +45,8 @@
*/ */
static const char *TAG = "rtc_sleep"; static const char *TAG = "rtc_sleep";
static const DRAM_ATTR rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
void rtc_sleep_pu(rtc_sleep_pu_config_t cfg) void rtc_sleep_pu(rtc_sleep_pu_config_t cfg)
{ {
REG_SET_FIELD(RTC_CNTL_DIG_PWC_REG, RTC_CNTL_LSLP_MEM_FORCE_PU, cfg.dig_fpu); REG_SET_FIELD(RTC_CNTL_DIG_PWC_REG, RTC_CNTL_LSLP_MEM_FORCE_PU, cfg.dig_fpu);
@@ -215,7 +218,6 @@ void rtc_sleep_get_default_config(uint32_t sleep_flags, rtc_sleep_config_t *out_
void rtc_sleep_init(rtc_sleep_config_t cfg) void rtc_sleep_init(rtc_sleep_config_t cfg)
{ {
if (cfg.lslp_mem_inf_fpu) { if (cfg.lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }
if (cfg.bt_pd_en) { if (cfg.bt_pd_en) {
@@ -397,7 +399,6 @@ static uint32_t rtc_sleep_finish(uint32_t lslp_mem_inf_fpu)
/* restore config if it is a light sleep */ /* restore config if it is a light sleep */
if (lslp_mem_inf_fpu) { if (lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }
return reject; return reject;

View File

@@ -5,6 +5,7 @@
*/ */
#include <stdint.h> #include <stdint.h>
#include "esp_attr.h"
#include "soc/soc.h" #include "soc/soc.h"
#include "soc/rtc.h" #include "soc/rtc.h"
#include "soc/rtc_cntl_reg.h" #include "soc/rtc_cntl_reg.h"
@@ -20,6 +21,8 @@
#define RTC_CNTL_MEM_FOLW_CPU (RTC_CNTL_SLOWMEM_FOLW_CPU | RTC_CNTL_FASTMEM_FOLW_CPU) #define RTC_CNTL_MEM_FOLW_CPU (RTC_CNTL_SLOWMEM_FOLW_CPU | RTC_CNTL_FASTMEM_FOLW_CPU)
static const DRAM_ATTR rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
/** /**
* Configure whether certain peripherals are powered up in sleep * Configure whether certain peripherals are powered up in sleep
* @param cfg power down flags as rtc_sleep_pu_config_t structure * @param cfg power down flags as rtc_sleep_pu_config_t structure
@@ -171,7 +174,6 @@ void rtc_sleep_get_default_config(uint32_t sleep_flags, rtc_sleep_config_t *out_
void rtc_sleep_init(rtc_sleep_config_t cfg) void rtc_sleep_init(rtc_sleep_config_t cfg)
{ {
if (cfg.lslp_mem_inf_fpu) { if (cfg.lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }
@@ -294,7 +296,6 @@ static uint32_t rtc_sleep_finish(uint32_t lslp_mem_inf_fpu)
/* restore config if it is a light sleep */ /* restore config if it is a light sleep */
if (lslp_mem_inf_fpu) { if (lslp_mem_inf_fpu) {
rtc_sleep_pu_config_t pu_cfg = RTC_SLEEP_PU_CONFIG_ALL(1);
rtc_sleep_pu(pu_cfg); rtc_sleep_pu(pu_cfg);
} }

View File

@@ -31,6 +31,7 @@
#include "soc/soc_caps.h" #include "soc/soc_caps.h"
#include "regi2c_ctrl.h" //For `REGI2C_ANA_CALI_PD_WORKAROUND`, temp #include "regi2c_ctrl.h" //For `REGI2C_ANA_CALI_PD_WORKAROUND`, temp
#include "hal/cache_hal.h"
#include "hal/wdt_hal.h" #include "hal/wdt_hal.h"
#include "hal/rtc_hal.h" #include "hal/rtc_hal.h"
#include "hal/uart_hal.h" #include "hal/uart_hal.h"
@@ -569,7 +570,11 @@ static uint32_t IRAM_ATTR esp_sleep_start(uint32_t pd_flags)
#endif #endif
#endif // SOC_PM_SUPPORT_DEEPSLEEP_CHECK_STUB_ONLY #endif // SOC_PM_SUPPORT_DEEPSLEEP_CHECK_STUB_ONLY
} else { } else {
/* Wait cache idle in cache suspend to avoid cache load wrong data after spi io isolation */
cache_hal_suspend(CACHE_TYPE_ALL);
result = call_rtc_sleep_start(reject_triggers, config.lslp_mem_inf_fpu); result = call_rtc_sleep_start(reject_triggers, config.lslp_mem_inf_fpu);
/* Resume cache for continue running */
cache_hal_resume(CACHE_TYPE_ALL);
} }
#if CONFIG_ESP_SLEEP_SYSTIMER_STALL_WORKAROUND #if CONFIG_ESP_SLEEP_SYSTIMER_STALL_WORKAROUND

View File

@@ -11,7 +11,11 @@ if(NOT CONFIG_HAL_WDT_USE_ROM_IMPL)
list(APPEND srcs "wdt_hal_iram.c") list(APPEND srcs "wdt_hal_iram.c")
endif() endif()
if(NOT ${target} STREQUAL "esp32") # We wrap Cache ROM APIs as Cache HAL APIs for: 1. internal ram ; 2. unified APIs
# ESP32 cache structure / ROM APIs are different and we have a patch `cache_hal_esp32.c` for it.
if(${target} STREQUAL "esp32")
list(APPEND srcs "esp32/cache_hal_esp32.c")
else()
list(APPEND srcs "cache_hal.c") list(APPEND srcs "cache_hal.c")
endif() endif()

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -52,6 +52,10 @@
typedef struct { typedef struct {
uint32_t data_autoload_flag; uint32_t data_autoload_flag;
uint32_t inst_autoload_flag; uint32_t inst_autoload_flag;
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
// There's no register indicating if cache is enabled on these chips, use sw flag to save this state.
volatile bool cache_enabled;
#endif
} cache_hal_context_t; } cache_hal_context_t;
static cache_hal_context_t ctx; static cache_hal_context_t ctx;
@@ -75,6 +79,10 @@ void cache_hal_init(void)
cache_ll_l1_enable_bus(1, CACHE_LL_DEFAULT_DBUS_MASK); cache_ll_l1_enable_bus(1, CACHE_LL_DEFAULT_DBUS_MASK);
cache_ll_l1_enable_bus(1, CACHE_LL_DEFAULT_IBUS_MASK); cache_ll_l1_enable_bus(1, CACHE_LL_DEFAULT_IBUS_MASK);
#endif #endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 1;
#endif
} }
void cache_hal_disable(cache_type_t type) void cache_hal_disable(cache_type_t type)
@@ -91,6 +99,10 @@ void cache_hal_disable(cache_type_t type)
Cache_Disable_DCache(); Cache_Disable_DCache();
} }
#endif #endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 0;
#endif
} }
void cache_hal_enable(cache_type_t type) void cache_hal_enable(cache_type_t type)
@@ -107,4 +119,57 @@ void cache_hal_enable(cache_type_t type)
Cache_Enable_DCache(ctx.data_autoload_flag); Cache_Enable_DCache(ctx.data_autoload_flag);
} }
#endif #endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 1;
#endif
}
void cache_hal_suspend(cache_type_t type)
{
#if SOC_SHARED_IDCACHE_SUPPORTED
Cache_Suspend_ICache();
#else
if (type == CACHE_TYPE_DATA) {
Cache_Suspend_DCache();
} else if (type == CACHE_TYPE_INSTRUCTION) {
Cache_Suspend_ICache();
} else {
Cache_Suspend_ICache();
Cache_Suspend_DCache();
}
#endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 0;
#endif
}
void cache_hal_resume(cache_type_t type)
{
#if SOC_SHARED_IDCACHE_SUPPORTED
Cache_Resume_ICache(ctx.inst_autoload_flag);
#else
if (type == CACHE_TYPE_DATA) {
Cache_Resume_DCache(ctx.data_autoload_flag);
} else if (type == CACHE_TYPE_INSTRUCTION) {
Cache_Resume_ICache(ctx.inst_autoload_flag);
} else {
Cache_Resume_ICache(ctx.inst_autoload_flag);
Cache_Resume_DCache(ctx.data_autoload_flag);
}
#endif
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
ctx.cache_enabled = 1;
#endif
}
bool cache_hal_is_cache_enabled(cache_type_t type)
{
#if CACHE_LL_ENABLE_DISABLE_STATE_SW
return ctx.cache_enabled;
#else
return cache_ll_l1_is_cache_enabled(0, type);
#endif
} }

View File

@@ -0,0 +1,40 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "hal/cache_ll.h"
#include "hal/cache_hal.h"
static uint32_t s_cache_status[2];
void cache_hal_suspend(cache_type_t type)
{
s_cache_status[0] = cache_ll_l1_get_enabled_bus(0);
cache_ll_l1_disable_cache(0);
#if !CONFIG_FREERTOS_UNICORE
s_cache_status[1] = cache_ll_l1_get_enabled_bus(1);
cache_ll_l1_disable_cache(1);
#endif
}
void cache_hal_resume(cache_type_t type)
{
cache_ll_l1_enable_cache(0);
cache_ll_l1_enable_bus(0, s_cache_status[0]);
#if !CONFIG_FREERTOS_UNICORE
cache_ll_l1_enable_cache(1);
cache_ll_l1_enable_bus(1, s_cache_status[1]);
#endif
}
bool cache_hal_is_cache_enabled(cache_type_t type)
{
bool result = cache_ll_l1_is_cache_enabled(0, CACHE_TYPE_ALL);
#if !CONFIG_FREERTOS_UNICORE
result = result && cache_ll_l1_is_cache_enabled(1, CACHE_TYPE_ALL);
#endif
return result;
}

View File

@@ -19,6 +19,66 @@
extern "C" { extern "C" {
#endif #endif
/**
* @brief enable a cache unit
*
* @param cache_id cache ID (when l1 cache is per core)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_enable_cache(uint32_t cache_id)
{
HAL_ASSERT(cache_id == 0 || cache_id == 1);
if (cache_id == 0) {
DPORT_REG_SET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
} else {
DPORT_REG_SET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
}
}
/**
* @brief disable a cache unit
*
* @param cache_id cache ID (when l1 cache is per core)
*/
__attribute__((always_inline))
static inline void cache_ll_l1_disable_cache(uint32_t cache_id)
{
if (cache_id == 0) {
while (DPORT_GET_PERI_REG_BITS2(DPORT_PRO_DCACHE_DBUG0_REG, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1){
;
}
DPORT_REG_CLR_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
} else {
while (DPORT_GET_PERI_REG_BITS2(DPORT_APP_DCACHE_DBUG0_REG, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1){
;
}
DPORT_REG_CLR_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
}
}
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0 || cache_id == 1);
(void) type; //On 32 it shares between I and D cache
bool enabled;
if (cache_id == 0) {
enabled = DPORT_REG_GET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE);
} else {
enabled = DPORT_REG_GET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE);
}
return enabled;
}
/** /**
* @brief Get the buses of a particular cache that are mapped to a virtual address range * @brief Get the buses of a particular cache that are mapped to a virtual address range
* *

View File

@@ -8,6 +8,7 @@
#pragma once #pragma once
#include <stdbool.h>
#include "soc/extmem_reg.h" #include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/cache_types.h" #include "hal/cache_types.h"
@@ -35,6 +36,21 @@ extern "C" {
#define CACHE_LL_L1_ILG_EVENT_PRELOAD_OP_FAULT (1<<1) #define CACHE_LL_L1_ILG_EVENT_PRELOAD_OP_FAULT (1<<1)
#define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0) #define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0)
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0);
(void) type; // On C2 there's only ICache
return REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
}
/** /**
* @brief Get the buses of a particular cache that are mapped to a virtual address range * @brief Get the buses of a particular cache that are mapped to a virtual address range
* *

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -8,6 +8,7 @@
#pragma once #pragma once
#include <stdbool.h>
#include "soc/extmem_reg.h" #include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/cache_types.h" #include "hal/cache_types.h"
@@ -35,6 +36,21 @@ extern "C" {
#define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0) #define CACHE_LL_L1_ILG_EVENT_SYNC_OP_FAULT (1<<0)
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0);
(void) type; // On C3 there's only ICache
return REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
}
/** /**
* @brief Get the buses of a particular cache that are mapped to a virtual address range * @brief Get the buses of a particular cache that are mapped to a virtual address range
* *

View File

@@ -16,7 +16,7 @@
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
#define CACHE_LL_ENABLE_DISABLE_STATE_SW 1 //There's no register indicating cache enable/disable state, we need to use software way for this state.
#define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0 #define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0
#define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_DBUS0 #define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_DBUS0

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -22,6 +22,29 @@ extern "C" {
#define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0 #define CACHE_LL_DEFAULT_IBUS_MASK CACHE_BUS_IBUS0
#define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_IBUS2 #define CACHE_LL_DEFAULT_DBUS_MASK CACHE_BUS_IBUS2
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0);
bool enabled;
if (type == CACHE_TYPE_INSTRUCTION) {
enabled = REG_GET_BIT(EXTMEM_PRO_ICACHE_CTRL_REG, EXTMEM_PRO_ICACHE_ENABLE);
} else if (type == CACHE_TYPE_DATA) {
enabled = REG_GET_BIT(EXTMEM_PRO_DCACHE_CTRL_REG, EXTMEM_PRO_DCACHE_ENABLE);
} else {
enabled = REG_GET_BIT(EXTMEM_PRO_ICACHE_CTRL_REG, EXTMEM_PRO_ICACHE_ENABLE);
enabled = enabled && REG_GET_BIT(EXTMEM_PRO_DCACHE_CTRL_REG, EXTMEM_PRO_DCACHE_ENABLE);
}
return enabled;
}
/** /**
* @brief Get the buses of a particular cache that are mapped to a virtual address range * @brief Get the buses of a particular cache that are mapped to a virtual address range

View File

@@ -8,6 +8,7 @@
#pragma once #pragma once
#include <stdbool.h>
#include "soc/extmem_reg.h" #include "soc/extmem_reg.h"
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#include "hal/cache_types.h" #include "hal/cache_types.h"
@@ -37,6 +38,29 @@ extern "C" {
#define CACHE_LL_L1_ILG_EVENT_ICACHE_PRELOAD_OP_FAULT (1<<1) #define CACHE_LL_L1_ILG_EVENT_ICACHE_PRELOAD_OP_FAULT (1<<1)
#define CACHE_LL_L1_ILG_EVENT_ICACHE_SYNC_OP_FAULT (1<<0) #define CACHE_LL_L1_ILG_EVENT_ICACHE_SYNC_OP_FAULT (1<<0)
/**
* @brief Get the status of cache if it is enabled or not
*
* @param cache_id cache ID (when l1 cache is per core)
* @param type see `cache_type_t`
* @return enabled or not
*/
__attribute__((always_inline))
static inline bool cache_ll_l1_is_cache_enabled(uint32_t cache_id, cache_type_t type)
{
HAL_ASSERT(cache_id == 0 || cache_id == 1);
bool enabled;
if (type == CACHE_TYPE_INSTRUCTION) {
enabled = REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
} else if (type == CACHE_TYPE_DATA) {
enabled = REG_GET_BIT(EXTMEM_DCACHE_CTRL_REG, EXTMEM_DCACHE_ENABLE);
} else {
enabled = REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE);
enabled = enabled && REG_GET_BIT(EXTMEM_DCACHE_CTRL_REG, EXTMEM_DCACHE_ENABLE);
}
return enabled;
}
/** /**
* @brief Get the buses of a particular cache that are mapped to a virtual address range * @brief Get the buses of a particular cache that are mapped to a virtual address range

View File

@@ -1,12 +1,13 @@
/* /*
* SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2021-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#pragma once #pragma once
#include <stdbool.h>
#include "hal/cache_types.h" #include "hal/cache_types.h"
#ifdef __cplusplus #ifdef __cplusplus
@@ -36,6 +37,86 @@ void cache_hal_disable(cache_type_t type);
*/ */
void cache_hal_enable(cache_type_t type); void cache_hal_enable(cache_type_t type);
/**
* @brief Suspend cache
*
* Suspend the ICache or DCache or bothsuspends the CPU access to cache for a while, without invalidation.
*
* @param type see `cache_type_t`
*
* @return Current status of corresponding Cache(s)
*/
void cache_hal_suspend(cache_type_t type);
/**
* @brief Resume cache
*
* Resume the ICache or DCache or both.
*
* @param type see `cache_type_t`
*/
void cache_hal_resume(cache_type_t type);
/**
* @brief Check if corresponding cache is enabled or not
*
* @param type see `cache_type_t`
*
* @return true: enabled; false: disabled
*/
bool cache_hal_is_cache_enabled(cache_type_t type);
/**
* @brief Invalidate cache supported addr
*
* Invalidate a Cache item for either ICache or DCache.
*
* @param vaddr Start address of the region to be invalidated
* @param size Size of the region to be invalidated
*/
void cache_hal_invalidate_addr(uint32_t vaddr, uint32_t size);
#if SOC_CACHE_WRITEBACK_SUPPORTED
/**
* @brief Writeback cache supported addr
*
* Writeback the DCache item to external memory
*
* @param vaddr Start address of the region to writeback
* @param size Size of the region to writeback
*/
void cache_hal_writeback_addr(uint32_t vaddr, uint32_t size);
#endif //#if SOC_CACHE_WRITEBACK_SUPPORTED
#if SOC_CACHE_FREEZE_SUPPORTED
/**
* @brief Freeze cache
*
* Freeze cache, CPU access to cache will be suspended, until the cache is unfrozen.
*
* @param type see `cache_type_t`
*/
void cache_hal_freeze(cache_type_t type);
/**
* @brief Unfreeze cache
*
* Unfreeze cache, CPU access to cache will be restored
*
* @param type see `cache_type_t`
*/
void cache_hal_unfreeze(cache_type_t type);
#endif //#if SOC_CACHE_FREEZE_SUPPORTED
/**
* @brief Get cache line size, in bytes
*
* @param type see `cache_type_t`
*
* @return cache line size, in bytes
*/
uint32_t cache_hal_get_cache_line_size(cache_type_t type);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -2,7 +2,9 @@
archive: libhal.a archive: libhal.a
entries: entries:
mmu_hal (noflash) mmu_hal (noflash)
if IDF_TARGET_ESP32 = n: if IDF_TARGET_ESP32 = y:
cache_hal_esp32 (noflash)
else:
cache_hal (noflash) cache_hal (noflash)
spi_hal_iram (noflash) spi_hal_iram (noflash)
spi_slave_hal_iram (noflash) spi_slave_hal_iram (noflash)

View File

@@ -207,6 +207,10 @@ config SOC_SHARED_IDCACHE_SUPPORTED
bool bool
default y default y
config SOC_IDCACHE_PER_CORE
bool
default y
config SOC_MMU_LINEAR_ADDRESS_REGION_NUM config SOC_MMU_LINEAR_ADDRESS_REGION_NUM
int int
default 5 default 5

View File

@@ -133,7 +133,8 @@
/*-------------------------- CACHE/MMU CAPS ----------------------------------*/ /*-------------------------- CACHE/MMU CAPS ----------------------------------*/
#define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data #define SOC_SHARED_IDCACHE_SUPPORTED 1 //Shared Cache for both instructions and data within one core
#define SOC_IDCACHE_PER_CORE 1 //Independent Cache unit pre core
#define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 5 #define SOC_MMU_LINEAR_ADDRESS_REGION_NUM 5

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -38,6 +38,8 @@
#include "soc/ext_mem_defs.h" #include "soc/ext_mem_defs.h"
#endif #endif
#include "esp_rom_spiflash.h" #include "esp_rom_spiflash.h"
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include <soc/soc.h> #include <soc/soc.h>
#include "sdkconfig.h" #include "sdkconfig.h"
#ifndef CONFIG_FREERTOS_UNICORE #ifndef CONFIG_FREERTOS_UNICORE
@@ -52,22 +54,11 @@
static __attribute__((unused)) const char *TAG = "cache"; static __attribute__((unused)) const char *TAG = "cache";
#define DPORT_CACHE_BIT(cpuid, regid) DPORT_ ## cpuid ## regid
#define DPORT_CACHE_MASK(cpuid) (DPORT_CACHE_BIT(cpuid, _CACHE_MASK_OPSDRAM) | DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DROM0) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DRAM1) | DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IROM0) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IRAM1) | DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IRAM0) )
#define DPORT_CACHE_VAL(cpuid) (~(DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DROM0) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_DRAM1) | \
DPORT_CACHE_BIT(cpuid, _CACHE_MASK_IRAM0)))
#define DPORT_CACHE_GET_VAL(cpuid) (cpuid == 0) ? DPORT_CACHE_VAL(PRO) : DPORT_CACHE_VAL(APP)
#define DPORT_CACHE_GET_MASK(cpuid) (cpuid == 0) ? DPORT_CACHE_MASK(PRO) : DPORT_CACHE_MASK(APP)
static void spi_flash_disable_cache(uint32_t cpuid, uint32_t *saved_state); static void spi_flash_disable_cache(uint32_t cpuid, uint32_t *saved_state);
static void spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state); static void spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state);
// Used only on ROM impl. in idf, this param unused, cache status hold by hal
static uint32_t s_flash_op_cache_state[2]; static uint32_t s_flash_op_cache_state[2];
#ifndef CONFIG_FREERTOS_UNICORE #ifndef CONFIG_FREERTOS_UNICORE
@@ -203,13 +194,16 @@ void IRAM_ATTR spi_flash_disable_interrupts_caches_and_other_cpu(void)
// with non-iram interrupts and the scheduler disabled. None of these CPUs will // with non-iram interrupts and the scheduler disabled. None of these CPUs will
// touch external RAM or flash this way, so we can safely disable caches. // touch external RAM or flash this way, so we can safely disable caches.
spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]); spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]);
#if SOC_IDCACHE_PER_CORE
//only needed if cache(s) is per core
spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]); spi_flash_disable_cache(other_cpuid, &s_flash_op_cache_state[other_cpuid]);
#endif
} }
void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu(void) void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu(void)
{ {
const int cpuid = xPortGetCoreID(); const int cpuid = xPortGetCoreID();
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
#ifndef NDEBUG #ifndef NDEBUG
// Sanity check: flash operation ends on the same CPU as it has started // Sanity check: flash operation ends on the same CPU as it has started
assert(cpuid == s_flash_op_cpu); assert(cpuid == s_flash_op_cpu);
@@ -218,9 +212,13 @@ void IRAM_ATTR spi_flash_enable_interrupts_caches_and_other_cpu(void)
s_flash_op_cpu = -1; s_flash_op_cpu = -1;
#endif #endif
// Re-enable cache on both CPUs. After this, cache (flash and external RAM) should work again. // Re-enable cache. After this, cache (flash and external RAM) should work again.
spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]); spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]);
#if SOC_IDCACHE_PER_CORE
//only needed if cache(s) is per core
const uint32_t other_cpuid = (cpuid == 0) ? 1 : 0;
spi_flash_restore_cache(other_cpuid, s_flash_op_cache_state[other_cpuid]); spi_flash_restore_cache(other_cpuid, s_flash_op_cache_state[other_cpuid]);
#endif
if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) { if (xTaskGetSchedulerState() != taskSCHEDULER_NOT_STARTED) {
// Signal to spi_flash_op_block_task that flash operation is complete // Signal to spi_flash_op_block_task that flash operation is complete
@@ -334,6 +332,19 @@ void IRAM_ATTR spi_flash_enable_interrupts_caches_no_os(void)
#endif // CONFIG_FREERTOS_UNICORE #endif // CONFIG_FREERTOS_UNICORE
void IRAM_ATTR spi_flash_enable_cache(uint32_t cpuid)
{
#if CONFIG_IDF_TARGET_ESP32
uint32_t cache_value = cache_ll_l1_get_enabled_bus(cpuid);
// Re-enable cache on this CPU
spi_flash_restore_cache(cpuid, cache_value);
#else
spi_flash_restore_cache(0, 0); // TODO cache_value should be non-zero
#endif
}
/** /**
* The following two functions are replacements for Cache_Read_Disable and Cache_Read_Enable * The following two functions are replacements for Cache_Read_Disable and Cache_Read_Enable
* function in ROM. They are used to work around a bug where Cache_Read_Disable requires a call to * function in ROM. They are used to work around a bug where Cache_Read_Disable requires a call to
@@ -341,77 +352,17 @@ void IRAM_ATTR spi_flash_enable_interrupts_caches_no_os(void)
*/ */
static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t *saved_state) static void IRAM_ATTR spi_flash_disable_cache(uint32_t cpuid, uint32_t *saved_state)
{ {
#if CONFIG_IDF_TARGET_ESP32 cache_hal_suspend(CACHE_TYPE_ALL);
uint32_t ret = 0;
const uint32_t cache_mask = DPORT_CACHE_GET_MASK(cpuid);
if (cpuid == 0) {
ret |= DPORT_GET_PERI_REG_BITS2(DPORT_PRO_CACHE_CTRL1_REG, cache_mask, 0);
while (DPORT_GET_PERI_REG_BITS2(DPORT_PRO_DCACHE_DBUG0_REG, DPORT_PRO_CACHE_STATE, DPORT_PRO_CACHE_STATE_S) != 1) {
;
}
DPORT_SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL_REG, 1, 0, DPORT_PRO_CACHE_ENABLE_S);
}
#if !CONFIG_FREERTOS_UNICORE
else {
ret |= DPORT_GET_PERI_REG_BITS2(DPORT_APP_CACHE_CTRL1_REG, cache_mask, 0);
while (DPORT_GET_PERI_REG_BITS2(DPORT_APP_DCACHE_DBUG0_REG, DPORT_APP_CACHE_STATE, DPORT_APP_CACHE_STATE_S) != 1) {
;
}
DPORT_SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL_REG, 1, 0, DPORT_APP_CACHE_ENABLE_S);
}
#endif
*saved_state = ret;
#elif CONFIG_IDF_TARGET_ESP32S2
*saved_state = Cache_Suspend_ICache();
#elif CONFIG_IDF_TARGET_ESP32S3
uint32_t icache_state, dcache_state;
icache_state = Cache_Suspend_ICache() << 16;
dcache_state = Cache_Suspend_DCache();
*saved_state = icache_state | dcache_state;
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2 || CONFIG_IDF_TARGET_ESP32C2
uint32_t icache_state;
icache_state = Cache_Suspend_ICache() << 16;
*saved_state = icache_state;
#endif
} }
static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state) static void IRAM_ATTR spi_flash_restore_cache(uint32_t cpuid, uint32_t saved_state)
{ {
#if CONFIG_IDF_TARGET_ESP32 cache_hal_resume(CACHE_TYPE_ALL);
const uint32_t cache_mask = DPORT_CACHE_GET_MASK(cpuid);
if (cpuid == 0) {
DPORT_SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL_REG, 1, 1, DPORT_PRO_CACHE_ENABLE_S);
DPORT_SET_PERI_REG_BITS(DPORT_PRO_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
}
#if !CONFIG_FREERTOS_UNICORE
else {
DPORT_SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL_REG, 1, 1, DPORT_APP_CACHE_ENABLE_S);
DPORT_SET_PERI_REG_BITS(DPORT_APP_CACHE_CTRL1_REG, cache_mask, saved_state, 0);
}
#endif
#elif CONFIG_IDF_TARGET_ESP32S2
Cache_Resume_ICache(saved_state);
#elif CONFIG_IDF_TARGET_ESP32S3
Cache_Resume_DCache(saved_state & 0xffff);
Cache_Resume_ICache(saved_state >> 16);
#elif CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2 || CONFIG_IDF_TARGET_ESP32C2
Cache_Resume_ICache(saved_state >> 16);
#endif
} }
IRAM_ATTR bool spi_flash_cache_enabled(void) bool IRAM_ATTR spi_flash_cache_enabled(void)
{ {
#if CONFIG_IDF_TARGET_ESP32 return cache_hal_is_cache_enabled(CACHE_TYPE_ALL);
bool result = (DPORT_REG_GET_BIT(DPORT_PRO_CACHE_CTRL_REG, DPORT_PRO_CACHE_ENABLE) != 0);
#if portNUM_PROCESSORS == 2
result = result && (DPORT_REG_GET_BIT(DPORT_APP_CACHE_CTRL_REG, DPORT_APP_CACHE_ENABLE) != 0);
#endif
#elif CONFIG_IDF_TARGET_ESP32S2
bool result = (REG_GET_BIT(EXTMEM_PRO_ICACHE_CTRL_REG, EXTMEM_PRO_ICACHE_ENABLE) != 0);
#elif CONFIG_IDF_TARGET_ESP32S3 || CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2 || CONFIG_IDF_TARGET_ESP32C2
bool result = (REG_GET_BIT(EXTMEM_ICACHE_CTRL_REG, EXTMEM_ICACHE_ENABLE) != 0);
#endif
return result;
} }
#if CONFIG_IDF_TARGET_ESP32S2 #if CONFIG_IDF_TARGET_ESP32S2
@@ -954,20 +905,7 @@ esp_err_t esp_enable_cache_wrap(bool icache_wrap_enable)
} }
return ESP_OK; return ESP_OK;
} }
#endif // CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2 || CONFIG_IDF_TARGET_ESP32C2 #endif // CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32C2
void IRAM_ATTR spi_flash_enable_cache(uint32_t cpuid)
{
#if CONFIG_IDF_TARGET_ESP32
uint32_t cache_value = DPORT_CACHE_GET_VAL(cpuid);
cache_value &= DPORT_CACHE_GET_MASK(cpuid);
// Re-enable cache on this CPU
spi_flash_restore_cache(cpuid, cache_value);
#else
spi_flash_restore_cache(0, 0); // TODO cache_value should be non-zero
#endif
}
#if CONFIG_IDF_TARGET_ESP32S3 #if CONFIG_IDF_TARGET_ESP32S3
/*protect cache opreation*/ /*protect cache opreation*/

View File

@@ -6,3 +6,16 @@ set(EXTRA_COMPONENT_DIRS "$ENV{IDF_PATH}/components/spi_flash/test_apps/componen
include($ENV{IDF_PATH}/tools/cmake/project.cmake) include($ENV{IDF_PATH}/tools/cmake/project.cmake)
project(test_esp_flash_stress) project(test_esp_flash_stress)
if(CONFIG_COMPILER_DUMP_RTL_FILES)
add_custom_target(check_test_app_sections ALL
COMMAND ${PYTHON} $ENV{IDF_PATH}/tools/ci/check_callgraph.py
--rtl-dir ${CMAKE_BINARY_DIR}/esp-idf/driver/
--elf-file ${CMAKE_BINARY_DIR}/mspi_test.elf
find-refs
--from-sections=.iram0.text
--to-sections=.flash.text,.flash.rodata
--exit-code
DEPENDS ${elf}
)
endif()

View File

@@ -0,0 +1,5 @@
# This config lists merged freertos_flash no_optimization in UT all together.
CONFIG_ESP_SYSTEM_MEMPROT_FEATURE=n
CONFIG_FREERTOS_PLACE_FUNCTIONS_INTO_FLASH=y
CONFIG_COMPILER_OPTIMIZATION_NONE=y
CONFIG_COMPILER_DUMP_RTL_FILES=y