diff --git a/components/esp_hw_support/lowpower/port/esp32h4/rvsleep-frames.h b/components/esp_hw_support/lowpower/port/esp32h4/rvsleep-frames.h new file mode 100644 index 0000000000..e7521996e0 --- /dev/null +++ b/components/esp_hw_support/lowpower/port/esp32h4/rvsleep-frames.h @@ -0,0 +1,214 @@ +/* + * SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef __RVSLEEP_FRAMES_H__ +#define __RVSLEEP_FRAMES_H__ + +#include "sdkconfig.h" + +/* Align a value up to nearest n-byte boundary, where n is a power of 2. */ +#define ALIGNUP(n, val) (((val) + (n) - 1) & -(n)) + +#ifdef STRUCT_BEGIN +#undef STRUCT_BEGIN +#undef STRUCT_FIELD +#undef STRUCT_AFIELD +#undef STRUCT_END +#endif + +#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) +#ifdef __clang__ +#define STRUCT_BEGIN .set RV_STRUCT_OFFSET, 0 +#define STRUCT_FIELD(ctype,size,asname,name) .set asname, RV_STRUCT_OFFSET; .set RV_STRUCT_OFFSET, asname + size +#define STRUCT_AFIELD(ctype,size,asname,name,n) .set asname, RV_STRUCT_OFFSET;\ + .set RV_STRUCT_OFFSET, asname + (size)*(n); +#define STRUCT_END(sname) .set sname##Size, RV_STRUCT_OFFSET; +#else // __clang__ +#define STRUCT_BEGIN .pushsection .text; .struct 0 +#define STRUCT_FIELD(ctype,size,asname,name) asname: .space size +#define STRUCT_AFIELD(ctype,size,asname,name,n) asname: .space (size)*(n) +#define STRUCT_END(sname) sname##Size:; .popsection +#endif // __clang__ +#else +#define STRUCT_BEGIN typedef struct { +#define STRUCT_FIELD(ctype,size,asname,name) ctype name; +#define STRUCT_AFIELD(ctype,size,asname,name,n) ctype name[n]; +#define STRUCT_END(sname) } sname; +#endif + +/* + * ------------------------------------------------------------------------------- + * RISC-V CORE CRITICAL REGISTER CONTEXT LAYOUT FOR SLEEP + * ------------------------------------------------------------------------------- + */ +STRUCT_BEGIN + STRUCT_FIELD (long, 4, RV_SLP_CTX_MEPC, mepc) /* Machine Exception Program Counter */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_RA, ra) /* Return address */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_SP, sp) /* Stack pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_GP, gp) /* Global pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_TP, tp) /* Thread pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T0, t0) /* Temporary/alternate link register */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T1, t1) /* t1-2: Temporaries */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T2, t2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S0, s0) /* Saved register/frame pointer */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_S1, s1) /* Saved register */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_A0, a0) /* a0-1: Function arguments/return address */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_A1, a1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A2, a2) /* a2-7: Function arguments */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_A3, a3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A4, a4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A5, a5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A6, a6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_A7, a7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S2, s2) /* s2-11: Saved registers */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_S3, s3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S4, s4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S5, s5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S6, s6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S7, s7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S8, s8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S9, s9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S10, s10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_S11, s11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_T3, t3) /* t3-6: Temporaries */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_T4, t4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_T5, t5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_T6, t6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MSTATUS, mstatus) /* Machine Status */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MTVEC, mtvec) /* Machine Trap-Vector Base Address */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCAUSE, mcause) /* Machine Trap Cause */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MTVAL, mtval) /* Machine Trap Value */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MIE, mie) /* Machine intr enable */ + STRUCT_FIELD (long, 4, RV_SLP_CTX_MIP, mip) /* Machine intr pending */ + + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMUFUNC, pmufunc) /* A field is used to identify whether it is going + * to sleep or has just been awakened. We use the + * lowest 2 bits as indication information, 3 means + * being awakened, 1 means going to sleep */ +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + STRUCT_FIELD (long, 4, RV_SLP_CSF_CTX_CRC, frame_crc) /* Used to check RvCoreCriticalSleepFrame integrity */ +#endif +STRUCT_END(RvCoreCriticalSleepFrame) + +#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) +#define RV_SLEEP_CTX_SZ1 RvCoreCriticalSleepFrameSize +#else +#define RV_SLEEP_CTX_SZ1 sizeof(RvCoreCriticalSleepFrame) +#endif + +/* + * Sleep stack frame size, after align up to 16 bytes boundary + */ +#define RV_SLEEP_CTX_FRMSZ (ALIGNUP(0x10, RV_SLEEP_CTX_SZ1)) + +/* + * ------------------------------------------------------------------------------- + * RISC-V CORE NON-CRITICAL REGISTER CONTEXT LAYOUT FOR SLEEP + * ------------------------------------------------------------------------------- + */ +STRUCT_BEGIN + STRUCT_FIELD (long, 4, RV_SLP_CTX_MSCRATCH, mscratch) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MISA, misa) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MSCRATCHCSW, mscratchcsw) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MSCRATCHCSW1, mscratchcsw1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCYCLEH, mcycleh) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MINSTRET, minstret) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MINSTRETH, minstreth) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCOUNTEREN, mcounteren) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCOUNTINHIBIT, mcountinhibit) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMCOUNTER8, mhpmcounter8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMCOUNTER9, mhpmcounter9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMCOUNTER13, mhpmcounter13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMCOUNTER8H, mhpmcounter8h) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMCOUNTER9H, mhpmcounter9h) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMCOUNTER13H, mhpmcounter13h) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMEVENT8, mhpmevent8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMEVENT9, mhpmevent9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHPMEVENT13, mhpmevent13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCONTEXT, mcontext) + STRUCT_FIELD (long, 4, RV_SLP_CTX_USTATUS, ustatus) + STRUCT_FIELD (long, 4, RV_SLP_CTX_UTVEC, utvec) + STRUCT_FIELD (long, 4, RV_SLP_CTX_USCRATCH, uscratch) + STRUCT_FIELD (long, 4, RV_SLP_CTX_UCAUSE, ucause) + STRUCT_FIELD (long, 4, RV_SLP_CTX_UINTTHRESH, uintthresh) + + STRUCT_FIELD (long, 4, RV_SLP_CTX_TSELECT, tselect) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TDATA1, tdata1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TDATA2, tdata2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TDATA3, tdata3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_TCONTROL, tcontrol) + + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR0, pmpaddr0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR1, pmpaddr1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR2, pmpaddr2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR3, pmpaddr3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR4, pmpaddr4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR5, pmpaddr5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR6, pmpaddr6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR7, pmpaddr7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR8, pmpaddr8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR9, pmpaddr9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR10, pmpaddr10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR11, pmpaddr11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR12, pmpaddr12) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR13, pmpaddr13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR14, pmpaddr14) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPADDR15, pmpaddr15) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG0, pmpcfg0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG1, pmpcfg1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG2, pmpcfg2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMPCFG3, pmpcfg3) + +#if SOC_CPU_HAS_PMA + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR0, pmaaddr0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR1, pmaaddr1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR2, pmaaddr2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR3, pmaaddr3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR4, pmaaddr4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR5, pmaaddr5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR6, pmaaddr6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR7, pmaaddr7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR8, pmaaddr8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR9, pmaaddr9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR10, pmaaddr10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR11, pmaaddr11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR12, pmaaddr12) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR13, pmaaddr13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR14, pmaaddr14) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMAADDR15, pmaaddr15) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG0, pmacfg0) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG1, pmacfg1) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG2, pmacfg2) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG3, pmacfg3) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG4, pmacfg4) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG5, pmacfg5) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG6, pmacfg6) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG7, pmacfg7) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG8, pmacfg8) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG9, pmacfg9) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG10, pmacfg10) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG11, pmacfg11) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG12, pmacfg12) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG13, pmacfg13) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG14, pmacfg14) + STRUCT_FIELD (long, 4, RV_SLP_CTX_PMACFG15, pmacfg15) +#endif // SOC_CPU_HAS_PMA + + STRUCT_FIELD (long, 4, RV_SLP_CTX_MCYCLE, mcycle) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MTVT, mtvt) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MINTTHRESH, mintthresh) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MXSTATUS, mxstatus) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHCR, mhcr) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MHINT, mhint) + STRUCT_FIELD (long, 4, RV_SLP_CTX_MEXSTATUS, mexstatus) + STRUCT_FIELD (long, 4, RV_SLP_CTX_JVT, jvt) + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + STRUCT_FIELD (long, 4, RV_SLP_NCSF_CTX_CRC, frame_crc) /* Used to check RvCoreNonCriticalSleepFrame integrity */ +#endif +STRUCT_END(RvCoreNonCriticalSleepFrame) + +#endif /* #ifndef __RVSLEEP_FRAMES_H__ */ diff --git a/components/esp_hw_support/lowpower/port/esp32h4/sleep_cpu.c b/components/esp_hw_support/lowpower/port/esp32h4/sleep_cpu.c new file mode 100644 index 0000000000..0801d8e84e --- /dev/null +++ b/components/esp_hw_support/lowpower/port/esp32h4/sleep_cpu.c @@ -0,0 +1,753 @@ +/* + * SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include + +#include "esp_attr.h" +#include "esp_check.h" +#include "esp_sleep.h" +#include "esp_log.h" +#include "esp_crc.h" +#include "freertos/FreeRTOS.h" +#include "freertos/task.h" +#include "esp_heap_caps.h" +#include "soc/soc_caps.h" +#include "esp_private/sleep_cpu.h" +#include "esp_private/sleep_event.h" +#include "sdkconfig.h" + +#if SOC_PMU_SUPPORTED +#include "esp_private/esp_pmu.h" +#else +#include "hal/rtc_hal.h" +#endif + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME +#include "esp_private/system_internal.h" +#include "hal/clk_gate_ll.h" +#include "hal/uart_hal.h" +#endif + +#include "soc/rtc_periph.h" + +#include "esp32h4/rom/rtc.h" +#include "rvsleep-frames.h" +#include "soc/intpri_reg.h" +#include "soc/cache_reg.h" +#include "soc/clint_reg.h" +#include "esp32h4/rom/cache.h" +#include "esp_ipc_isr.h" +#include "soc/pcr_reg.h" + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE +#include +#include "soc/hp_system_reg.h" +typedef enum { + SMP_IDLE, + SMP_BACKUP_START, + SMP_BACKUP_DONE, + SMP_RESTORE_START, + SMP_RESTORE_DONE, + SMP_SKIP_RETENTION, +} smp_retention_state_t; + +static DRAM_ATTR smp_retention_state_t s_smp_retention_state[portNUM_PROCESSORS]; +#endif + +static __attribute__((unused)) const char *TAG = "sleep"; + +typedef struct { + uint32_t start; + uint32_t end; +} cpu_domain_dev_regs_region_t; + +typedef struct { + cpu_domain_dev_regs_region_t *region; + int region_num; + uint32_t *regs_frame; +} cpu_domain_dev_sleep_frame_t; + +/** + * Internal structure which holds all requested light sleep cpu retention parameters + */ +typedef struct { + struct { + RvCoreCriticalSleepFrame *critical_frame[portNUM_PROCESSORS]; + RvCoreNonCriticalSleepFrame *non_critical_frame[portNUM_PROCESSORS]; + cpu_domain_dev_sleep_frame_t *cache_config_frame; + cpu_domain_dev_sleep_frame_t *clint_frame[portNUM_PROCESSORS]; + cpu_domain_dev_sleep_frame_t *clic_frame[portNUM_PROCESSORS]; + } retent; +} sleep_cpu_retention_t; + +static DRAM_ATTR __attribute__((unused)) sleep_cpu_retention_t s_cpu_retention; + + +#if SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW + +#define CUSTOM_CSR_MTVT (0x307) +#define CUSTOM_CSR_MNXTI (0x345) +#define CUSTOM_CSR_MINTTHRESH (0x347) +#define CUSTOM_CSR_MSCRATCHCSW (0x348) +#define CUSTOM_CSR_MSCRATCHCSW1 (0x349) +#define CUSTOM_CSR_UINTTHRESH (0x047) +#define CUSTOM_CSR_UINTSTATUS (0xCB1) +#define CUSTOM_CSR_MXSTATUS (0x7c0) +#define CUSTOM_CSR_MHCR (0x7c1) +#define CUSTOM_CSR_MHINT (0x7c5) +#define CUSTOM_CSR_MEXSTATUS (0x7e1) +#define CUSTOM_CSR_JVT (0x017) + +extern RvCoreCriticalSleepFrame *rv_core_critical_regs_frame[portNUM_PROCESSORS]; + +static void * cpu_domain_dev_sleep_frame_alloc_and_init(const cpu_domain_dev_regs_region_t *regions, const int region_num) +{ + const int region_sz = sizeof(cpu_domain_dev_regs_region_t) * region_num; + int regs_frame_sz = 0; + for (int num = 0; num < region_num; num++) { + regs_frame_sz += regions[num].end - regions[num].start; + } + void *frame = heap_caps_malloc(sizeof(cpu_domain_dev_sleep_frame_t) + region_sz + regs_frame_sz, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL); + if (frame) { + cpu_domain_dev_regs_region_t *region = (cpu_domain_dev_regs_region_t *)(frame + sizeof(cpu_domain_dev_sleep_frame_t)); + memcpy(region, regions, region_num * sizeof(cpu_domain_dev_regs_region_t)); + void *regs_frame = frame + sizeof(cpu_domain_dev_sleep_frame_t) + region_sz; + memset(regs_frame, 0, regs_frame_sz); + *(cpu_domain_dev_sleep_frame_t *)frame = (cpu_domain_dev_sleep_frame_t) { + .region = region, + .region_num = region_num, + .regs_frame = (uint32_t *)regs_frame + }; + } + return frame; +} + +static inline void * cpu_domain_cache_config_sleep_frame_alloc_and_init(void) +{ + const static cpu_domain_dev_regs_region_t regions[] = { + { .start = CACHE_L1_ICACHE_CTRL_REG, .end = CACHE_L1_DCACHE_CTRL_REG + 4 }, + { .start = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG, .end = CACHE_L1_CACHE_WRAP_AROUND_CTRL_REG + 4 } + }; + return cpu_domain_dev_sleep_frame_alloc_and_init(regions, sizeof(regions) / sizeof(regions[0])); +} + +static inline void * cpu_domain_clint_sleep_frame_alloc_and_init(uint8_t core_id) +{ + const static cpu_domain_dev_regs_region_t regions[portNUM_PROCESSORS][3] = { + [0 ... portNUM_PROCESSORS - 1] = { + { .start = CLINT_MINT_SIP_REG, .end = CLINT_MINT_SIP_REG + 4 }, + { .start = CLINT_MINT_MTIMECMP_L_REG, .end = CLINT_MINT_TIMECTL_REG + 4 }, + { .start = CLINT_MINT_MTIME_L_REG, .end = CLINT_MINT_MTIME_H_REG + 4 }, + } + }; + return cpu_domain_dev_sleep_frame_alloc_and_init(regions[core_id], sizeof(regions[core_id]) / sizeof(cpu_domain_dev_regs_region_t)); +} + +static inline void * cpu_domain_clic_sleep_frame_alloc_and_init(uint8_t core_id) +{ + const static cpu_domain_dev_regs_region_t regions[portNUM_PROCESSORS][4] = { + [0 ... portNUM_PROCESSORS - 1] = { + { .start = CLIC_INT_CONFIG_REG, .end = CLIC_INT_THRESH_REG + 4 }, + { .start = CLIC_INT_CTRL_REG(3), .end = CLIC_INT_CTRL_REG(3) + 4 }, + { .start = CLIC_INT_CTRL_REG(7), .end = CLIC_INT_CTRL_REG(7) + 4 }, + { .start = CLIC_INT_CTRL_REG(16), .end = CLIC_INT_CTRL_REG(47) + 4 }, + } + }; + return cpu_domain_dev_sleep_frame_alloc_and_init(regions[core_id], sizeof(regions[core_id]) / sizeof(cpu_domain_dev_regs_region_t)); +} + +static esp_err_t esp_sleep_cpu_retention_init_impl(void) +{ + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + if (s_cpu_retention.retent.critical_frame[core_id] == NULL) { + void *frame = heap_caps_calloc(1, RV_SLEEP_CTX_FRMSZ, MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.critical_frame[core_id] = (RvCoreCriticalSleepFrame *)frame; + rv_core_critical_regs_frame[core_id] = (RvCoreCriticalSleepFrame *)frame; + } + if (s_cpu_retention.retent.non_critical_frame[core_id] == NULL) { + void *frame = heap_caps_calloc(1, sizeof(RvCoreNonCriticalSleepFrame), MALLOC_CAP_32BIT|MALLOC_CAP_INTERNAL); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.non_critical_frame[core_id] = (RvCoreNonCriticalSleepFrame *)frame; + } + if (s_cpu_retention.retent.clic_frame[core_id] == NULL) { + void *frame = cpu_domain_clic_sleep_frame_alloc_and_init(core_id); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.clic_frame[core_id] = (cpu_domain_dev_sleep_frame_t *)frame; + } + if (s_cpu_retention.retent.clint_frame[core_id] == NULL) { + void *frame = cpu_domain_clint_sleep_frame_alloc_and_init(core_id); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.clint_frame[core_id] = (cpu_domain_dev_sleep_frame_t *)frame; + } + } + if (s_cpu_retention.retent.cache_config_frame == NULL) { + void *frame = cpu_domain_cache_config_sleep_frame_alloc_and_init(); + if (frame == NULL) { + goto err; + } + s_cpu_retention.retent.cache_config_frame = (cpu_domain_dev_sleep_frame_t *)frame; + } +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + atomic_init(&s_smp_retention_state[core_id], SMP_IDLE); + } +#endif + return ESP_OK; +err: + esp_sleep_cpu_retention_deinit(); + return ESP_ERR_NO_MEM; +} + +static esp_err_t esp_sleep_cpu_retention_deinit_impl(void) +{ + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + if (s_cpu_retention.retent.critical_frame[core_id]) { + heap_caps_free((void *)s_cpu_retention.retent.critical_frame[core_id]); + s_cpu_retention.retent.critical_frame[core_id] = NULL; + rv_core_critical_regs_frame[core_id] = NULL; + } + if (s_cpu_retention.retent.non_critical_frame[core_id]) { + heap_caps_free((void *)s_cpu_retention.retent.non_critical_frame[core_id]); + s_cpu_retention.retent.non_critical_frame[core_id] = NULL; + } + if (s_cpu_retention.retent.clic_frame[core_id]) { + heap_caps_free((void *)s_cpu_retention.retent.clic_frame[core_id]); + s_cpu_retention.retent.clic_frame[core_id] = NULL; + } + if (s_cpu_retention.retent.clint_frame[core_id]) { + heap_caps_free((void *)s_cpu_retention.retent.clint_frame[core_id]); + s_cpu_retention.retent.clint_frame[core_id] = NULL; + } + } + if (s_cpu_retention.retent.cache_config_frame) { + heap_caps_free((void *)s_cpu_retention.retent.cache_config_frame); + s_cpu_retention.retent.cache_config_frame = NULL; + } + return ESP_OK; +} + +static inline IRAM_ATTR uint32_t save_mstatus_and_disable_global_int(void) +{ + uint32_t mstatus; + __asm__ __volatile__ ( + "csrr %0, mstatus\n" + "csrci mstatus, 0x8\n" + : "=r"(mstatus) + ); + return mstatus; +} + +static inline IRAM_ATTR void restore_mstatus(uint32_t mstatus) +{ + __asm__ __volatile__ ("csrw mstatus, %0\n" :: "r"(mstatus)); +} + +static IRAM_ATTR RvCoreNonCriticalSleepFrame * rv_core_noncritical_regs_save(void) +{ + RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame[esp_cpu_get_core_id()]; + + frame->mscratch = RV_READ_CSR(mscratch); + frame->misa = RV_READ_CSR(misa); + frame->tselect = RV_READ_CSR(tselect); + frame->tdata1 = RV_READ_CSR(tdata1); + frame->tdata2 = RV_READ_CSR(tdata2); + frame->tdata3 = RV_READ_CSR(tdata3); + frame->tcontrol = RV_READ_CSR(tcontrol); + frame->mscratchcsw = RV_READ_CSR(CUSTOM_CSR_MSCRATCHCSW); + frame->mscratchcsw1 = RV_READ_CSR(CUSTOM_CSR_MSCRATCHCSW1); + frame->mcycleh = RV_READ_CSR(mcycleh); + frame->minstret = RV_READ_CSR(minstret); + frame->minstreth = RV_READ_CSR(minstreth); + frame->mcounteren = RV_READ_CSR(mcounteren); + frame->mcountinhibit = RV_READ_CSR(mcountinhibit); + frame->mhpmcounter8 = RV_READ_CSR(mhpmcounter8); + frame->mhpmcounter9 = RV_READ_CSR(mhpmcounter9); + frame->mhpmcounter13 = RV_READ_CSR(mhpmcounter13); + frame->mhpmcounter8h = RV_READ_CSR(mhpmcounter8h); + frame->mhpmcounter9h = RV_READ_CSR(mhpmcounter9h); + frame->mhpmcounter13h = RV_READ_CSR(mhpmcounter13h); + frame->mhpmevent8 = RV_READ_CSR(mhpmevent8); + frame->mhpmevent9 = RV_READ_CSR(mhpmevent9); + frame->mhpmevent13 = RV_READ_CSR(mhpmevent13); + frame->mcontext = RV_READ_CSR(mcontext); + frame->ustatus = RV_READ_CSR(ustatus); + frame->utvec = RV_READ_CSR(utvec); + frame->uscratch = RV_READ_CSR(uscratch); + frame->ucause = RV_READ_CSR(ucause); + frame->uintthresh = RV_READ_CSR(CUSTOM_CSR_UINTTHRESH); + + frame->pmpaddr0 = RV_READ_CSR(pmpaddr0); + frame->pmpaddr1 = RV_READ_CSR(pmpaddr1); + frame->pmpaddr2 = RV_READ_CSR(pmpaddr2); + frame->pmpaddr3 = RV_READ_CSR(pmpaddr3); + frame->pmpaddr4 = RV_READ_CSR(pmpaddr4); + frame->pmpaddr5 = RV_READ_CSR(pmpaddr5); + frame->pmpaddr6 = RV_READ_CSR(pmpaddr6); + frame->pmpaddr7 = RV_READ_CSR(pmpaddr7); + frame->pmpaddr8 = RV_READ_CSR(pmpaddr8); + frame->pmpaddr9 = RV_READ_CSR(pmpaddr9); + frame->pmpaddr10 = RV_READ_CSR(pmpaddr10); + frame->pmpaddr11 = RV_READ_CSR(pmpaddr11); + frame->pmpaddr12 = RV_READ_CSR(pmpaddr12); + frame->pmpaddr13 = RV_READ_CSR(pmpaddr13); + frame->pmpaddr14 = RV_READ_CSR(pmpaddr14); + frame->pmpaddr15 = RV_READ_CSR(pmpaddr15); + frame->pmpcfg0 = RV_READ_CSR(pmpcfg0); + frame->pmpcfg1 = RV_READ_CSR(pmpcfg1); + frame->pmpcfg2 = RV_READ_CSR(pmpcfg2); + frame->pmpcfg3 = RV_READ_CSR(pmpcfg3); + +#if SOC_CPU_HAS_PMA + frame->pmaaddr0 = RV_READ_CSR(CSR_PMAADDR(0)); + frame->pmaaddr1 = RV_READ_CSR(CSR_PMAADDR(1)); + frame->pmaaddr2 = RV_READ_CSR(CSR_PMAADDR(2)); + frame->pmaaddr3 = RV_READ_CSR(CSR_PMAADDR(3)); + frame->pmaaddr4 = RV_READ_CSR(CSR_PMAADDR(4)); + frame->pmaaddr5 = RV_READ_CSR(CSR_PMAADDR(5)); + frame->pmaaddr6 = RV_READ_CSR(CSR_PMAADDR(6)); + frame->pmaaddr7 = RV_READ_CSR(CSR_PMAADDR(7)); + frame->pmaaddr8 = RV_READ_CSR(CSR_PMAADDR(8)); + frame->pmaaddr9 = RV_READ_CSR(CSR_PMAADDR(9)); + frame->pmaaddr10 = RV_READ_CSR(CSR_PMAADDR(10)); + frame->pmaaddr11 = RV_READ_CSR(CSR_PMAADDR(11)); + frame->pmaaddr12 = RV_READ_CSR(CSR_PMAADDR(12)); + frame->pmaaddr13 = RV_READ_CSR(CSR_PMAADDR(13)); + frame->pmaaddr14 = RV_READ_CSR(CSR_PMAADDR(14)); + frame->pmaaddr15 = RV_READ_CSR(CSR_PMAADDR(15)); + frame->pmacfg0 = RV_READ_CSR(CSR_PMACFG(0)); + frame->pmacfg1 = RV_READ_CSR(CSR_PMACFG(1)); + frame->pmacfg2 = RV_READ_CSR(CSR_PMACFG(2)); + frame->pmacfg3 = RV_READ_CSR(CSR_PMACFG(3)); + frame->pmacfg4 = RV_READ_CSR(CSR_PMACFG(4)); + frame->pmacfg5 = RV_READ_CSR(CSR_PMACFG(5)); + frame->pmacfg6 = RV_READ_CSR(CSR_PMACFG(6)); + frame->pmacfg7 = RV_READ_CSR(CSR_PMACFG(7)); + frame->pmacfg8 = RV_READ_CSR(CSR_PMACFG(8)); + frame->pmacfg9 = RV_READ_CSR(CSR_PMACFG(9)); + frame->pmacfg10 = RV_READ_CSR(CSR_PMACFG(10)); + frame->pmacfg11 = RV_READ_CSR(CSR_PMACFG(11)); + frame->pmacfg12 = RV_READ_CSR(CSR_PMACFG(12)); + frame->pmacfg13 = RV_READ_CSR(CSR_PMACFG(13)); + frame->pmacfg14 = RV_READ_CSR(CSR_PMACFG(14)); + frame->pmacfg15 = RV_READ_CSR(CSR_PMACFG(15)); +#endif // SOC_CPU_HAS_PMA + + frame->mcycle = RV_READ_CSR(mcycle); + frame->mtvt = RV_READ_CSR(CUSTOM_CSR_MTVT); + frame->mintthresh = RV_READ_CSR(CUSTOM_CSR_MINTTHRESH); + frame->mxstatus = RV_READ_CSR(CUSTOM_CSR_MXSTATUS); + frame->mhcr = RV_READ_CSR(CUSTOM_CSR_MHCR); + frame->mhint = RV_READ_CSR(CUSTOM_CSR_MHINT); + frame->mexstatus = RV_READ_CSR(CUSTOM_CSR_MEXSTATUS); + frame->jvt = RV_READ_CSR(CUSTOM_CSR_JVT); + return frame; +} + +static IRAM_ATTR void rv_core_noncritical_regs_restore(void) +{ + RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame[esp_cpu_get_core_id()]; + + RV_WRITE_CSR(mscratch, frame->mscratch); + RV_WRITE_CSR(misa, frame->misa); + RV_WRITE_CSR(tselect, frame->tselect); + RV_WRITE_CSR(tdata1, frame->tdata1); + RV_WRITE_CSR(tdata2, frame->tdata2); + RV_WRITE_CSR(tdata3, frame->tdata3); + RV_WRITE_CSR(tcontrol, frame->tcontrol); + RV_WRITE_CSR(CUSTOM_CSR_MSCRATCHCSW, frame->mscratchcsw); + RV_WRITE_CSR(CUSTOM_CSR_MSCRATCHCSW1, frame->mscratchcsw1); + RV_WRITE_CSR(mcycleh, frame->mcycleh); + RV_WRITE_CSR(minstret, frame->minstret); + RV_WRITE_CSR(minstreth, frame->minstreth); + RV_WRITE_CSR(mcounteren, frame->mcounteren); + RV_WRITE_CSR(mcountinhibit, frame->mcountinhibit); + RV_WRITE_CSR(mhpmcounter8, frame->mhpmcounter8); + RV_WRITE_CSR(mhpmcounter9, frame->mhpmcounter9); + RV_WRITE_CSR(mhpmcounter13, frame->mhpmcounter13); + RV_WRITE_CSR(mhpmcounter8h, frame->mhpmcounter8h); + RV_WRITE_CSR(mhpmcounter9h, frame->mhpmcounter9h); + RV_WRITE_CSR(mhpmcounter13h, frame->mhpmcounter13h); + RV_WRITE_CSR(mhpmevent8, frame->mhpmevent8); + RV_WRITE_CSR(mhpmevent9, frame->mhpmevent9); + RV_WRITE_CSR(mhpmevent13, frame->mhpmevent13); + RV_WRITE_CSR(mcontext, frame->mcontext); + RV_WRITE_CSR(ustatus, frame->ustatus); + RV_WRITE_CSR(utvec, frame->utvec); + RV_WRITE_CSR(uscratch, frame->uscratch); + RV_WRITE_CSR(ucause, frame->ucause); + RV_WRITE_CSR(CUSTOM_CSR_UINTTHRESH, frame->uintthresh); + + RV_WRITE_CSR(pmpaddr0, frame->pmpaddr0); + RV_WRITE_CSR(pmpaddr1, frame->pmpaddr1); + RV_WRITE_CSR(pmpaddr2, frame->pmpaddr2); + RV_WRITE_CSR(pmpaddr3, frame->pmpaddr3); + RV_WRITE_CSR(pmpaddr4, frame->pmpaddr4); + RV_WRITE_CSR(pmpaddr5, frame->pmpaddr5); + RV_WRITE_CSR(pmpaddr6, frame->pmpaddr6); + RV_WRITE_CSR(pmpaddr7, frame->pmpaddr7); + RV_WRITE_CSR(pmpaddr8, frame->pmpaddr8); + RV_WRITE_CSR(pmpaddr9, frame->pmpaddr9); + RV_WRITE_CSR(pmpaddr10,frame->pmpaddr10); + RV_WRITE_CSR(pmpaddr11,frame->pmpaddr11); + RV_WRITE_CSR(pmpaddr12,frame->pmpaddr12); + RV_WRITE_CSR(pmpaddr13,frame->pmpaddr13); + RV_WRITE_CSR(pmpaddr14,frame->pmpaddr14); + RV_WRITE_CSR(pmpaddr15,frame->pmpaddr15); + RV_WRITE_CSR(pmpcfg0, frame->pmpcfg0); + RV_WRITE_CSR(pmpcfg1, frame->pmpcfg1); + RV_WRITE_CSR(pmpcfg2, frame->pmpcfg2); + RV_WRITE_CSR(pmpcfg3, frame->pmpcfg3); + +#if SOC_CPU_HAS_PMA + RV_WRITE_CSR(CSR_PMAADDR(0), frame->pmaaddr0); + RV_WRITE_CSR(CSR_PMAADDR(1), frame->pmaaddr1); + RV_WRITE_CSR(CSR_PMAADDR(2), frame->pmaaddr2); + RV_WRITE_CSR(CSR_PMAADDR(3), frame->pmaaddr3); + RV_WRITE_CSR(CSR_PMAADDR(4), frame->pmaaddr4); + RV_WRITE_CSR(CSR_PMAADDR(5), frame->pmaaddr5); + RV_WRITE_CSR(CSR_PMAADDR(6), frame->pmaaddr6); + RV_WRITE_CSR(CSR_PMAADDR(7), frame->pmaaddr7); + RV_WRITE_CSR(CSR_PMAADDR(8), frame->pmaaddr8); + RV_WRITE_CSR(CSR_PMAADDR(9), frame->pmaaddr9); + RV_WRITE_CSR(CSR_PMAADDR(10),frame->pmaaddr10); + RV_WRITE_CSR(CSR_PMAADDR(11),frame->pmaaddr11); + RV_WRITE_CSR(CSR_PMAADDR(12),frame->pmaaddr12); + RV_WRITE_CSR(CSR_PMAADDR(13),frame->pmaaddr13); + RV_WRITE_CSR(CSR_PMAADDR(14),frame->pmaaddr14); + RV_WRITE_CSR(CSR_PMAADDR(15),frame->pmaaddr15); + RV_WRITE_CSR(CSR_PMACFG(0), frame->pmacfg0); + RV_WRITE_CSR(CSR_PMACFG(1), frame->pmacfg1); + RV_WRITE_CSR(CSR_PMACFG(2), frame->pmacfg2); + RV_WRITE_CSR(CSR_PMACFG(3), frame->pmacfg3); + RV_WRITE_CSR(CSR_PMACFG(4), frame->pmacfg4); + RV_WRITE_CSR(CSR_PMACFG(5), frame->pmacfg5); + RV_WRITE_CSR(CSR_PMACFG(6), frame->pmacfg6); + RV_WRITE_CSR(CSR_PMACFG(7), frame->pmacfg7); + RV_WRITE_CSR(CSR_PMACFG(8), frame->pmacfg8); + RV_WRITE_CSR(CSR_PMACFG(9), frame->pmacfg9); + RV_WRITE_CSR(CSR_PMACFG(10), frame->pmacfg10); + RV_WRITE_CSR(CSR_PMACFG(11), frame->pmacfg11); + RV_WRITE_CSR(CSR_PMACFG(12), frame->pmacfg12); + RV_WRITE_CSR(CSR_PMACFG(13), frame->pmacfg13); + RV_WRITE_CSR(CSR_PMACFG(14), frame->pmacfg14); + RV_WRITE_CSR(CSR_PMACFG(15), frame->pmacfg15); +#endif //SOC_CPU_HAS_PMA + + RV_WRITE_CSR(mcycle, frame->mcycle); + + RV_WRITE_CSR(CUSTOM_CSR_MTVT, frame->mtvt); + RV_WRITE_CSR(CUSTOM_CSR_MINTTHRESH, frame->mintthresh); + RV_WRITE_CSR(CUSTOM_CSR_MXSTATUS, frame->mxstatus); + RV_WRITE_CSR(CUSTOM_CSR_MHCR, frame->mhcr); + RV_WRITE_CSR(CUSTOM_CSR_MHINT, frame->mhint); + RV_WRITE_CSR(CUSTOM_CSR_MEXSTATUS, frame->mexstatus); + RV_WRITE_CSR(CUSTOM_CSR_JVT, frame->jvt); +} + +static IRAM_ATTR void cpu_domain_dev_regs_save(cpu_domain_dev_sleep_frame_t *frame) +{ + assert(frame); + cpu_domain_dev_regs_region_t *region = frame->region; + uint32_t *regs_frame = frame->regs_frame; + + int offset = 0; + for (int i = 0; i < frame->region_num; i++) { + for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) { + regs_frame[offset++] = *(uint32_t *)addr; + } + } +} + +static IRAM_ATTR void cpu_domain_dev_regs_restore(cpu_domain_dev_sleep_frame_t *frame) +{ + assert(frame); + cpu_domain_dev_regs_region_t *region = frame->region; + uint32_t *regs_frame = frame->regs_frame; + + int offset = 0; + for (int i = 0; i < frame->region_num; i++) { + for (uint32_t addr = region[i].start; addr < region[i].end; addr+=4) { + *(uint32_t *)addr = regs_frame[offset++]; + } + } +} + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME +static IRAM_ATTR void update_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr) +{ + *(frame_crc_ptr) = esp_crc32_le(0, (void *)frame_ptr, frame_check_size); +} + +static IRAM_ATTR void validate_retention_frame_crc(uint32_t *frame_ptr, uint32_t frame_check_size, uint32_t *frame_crc_ptr) +{ + if(*(frame_crc_ptr) != esp_crc32_le(0, (void *)(frame_ptr), frame_check_size)){ + // resume uarts + for (int i = 0; i < SOC_UART_NUM; ++i) { +#ifndef CONFIG_IDF_TARGET_ESP32 + if (!uart_ll_is_enabled(i)) { + continue; + } +#endif + uart_ll_force_xon(i); + } + + /* Since it is still in the critical now, use ESP_EARLY_LOG */ + ESP_EARLY_LOGE(TAG, "Sleep retention frame is corrupted"); + esp_restart_noos(); + } +} +#endif + +extern RvCoreCriticalSleepFrame * rv_core_critical_regs_save(void); +extern RvCoreCriticalSleepFrame * rv_core_critical_regs_restore(void); +typedef uint32_t (* sleep_cpu_entry_cb_t)(uint32_t, uint32_t, uint32_t, bool); + +static IRAM_ATTR esp_err_t do_cpu_retention(sleep_cpu_entry_cb_t goto_sleep, + uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp) +{ + __attribute__((unused)) uint8_t core_id = esp_cpu_get_core_id(); + RvCoreCriticalSleepFrame * frame = rv_core_critical_regs_save(); + if ((frame->pmufunc & 0x3) == 0x1) { + esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_END, (void *)0); +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + /* Minus 2 * sizeof(long) is for bypass `pmufunc` and `frame_crc` field */ + update_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc)); +#endif + REG_WRITE(RTC_SLEEP_WAKE_STUB_ADDR_REG, (uint32_t)rv_core_critical_regs_restore); + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_DONE); + while (atomic_load(&s_smp_retention_state[!core_id]) != SMP_BACKUP_DONE) { + ; + } +#endif + + return (*goto_sleep)(wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp); + } +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + else { + validate_retention_frame_crc((uint32_t*)frame, RV_SLEEP_CTX_FRMSZ - 2 * sizeof(long), (uint32_t *)(&frame->frame_crc)); + } +#endif + + return pmu_sleep_finish(dslp); +} + +esp_err_t IRAM_ATTR esp_sleep_cpu_retention(uint32_t (*goto_sleep)(uint32_t, uint32_t, uint32_t, bool), + uint32_t wakeup_opt, uint32_t reject_opt, uint32_t lslp_mem_inf_fpu, bool dslp) +{ + esp_sleep_execute_event_callbacks(SLEEP_EVENT_SW_CPU_TO_MEM_START, (void *)0); + uint32_t mstatus = save_mstatus_and_disable_global_int(); + uint8_t core_id = esp_cpu_get_core_id(); +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START); +#endif + + cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame[core_id]); + cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]); + cpu_domain_dev_regs_save(s_cpu_retention.retent.cache_config_frame); + rv_core_noncritical_regs_save(); + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + RvCoreNonCriticalSleepFrame *frame = s_cpu_retention.retent.non_critical_frame[core_id]; + /* Minus sizeof(long) is for bypass `frame_crc` field */ + update_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc)); +#endif + + esp_err_t err = do_cpu_retention(goto_sleep, wakeup_opt, reject_opt, lslp_mem_inf_fpu, dslp); + +#if CONFIG_PM_CHECK_SLEEP_RETENTION_FRAME + validate_retention_frame_crc((uint32_t*)frame, sizeof(RvCoreNonCriticalSleepFrame) - sizeof(long), (uint32_t *)(&frame->frame_crc)); +#endif + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + // Start core1 + if (core_id == 0) { + REG_SET_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_CLK_EN); + REG_CLR_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_RST_EN); + } + + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_START); +#endif + + rv_core_noncritical_regs_restore(); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.cache_config_frame); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame[core_id]); + restore_mstatus(mstatus); + +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP && !CONFIG_FREERTOS_UNICORE + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE); +#endif + return err; +} + +#endif // SOC_PM_SUPPORT_CPU_PD && SOC_PM_CPU_RETENTION_BY_SW + + +#if SOC_PM_SUPPORT_CPU_PD + +esp_err_t esp_sleep_cpu_retention_init(void) +{ + esp_err_t err = ESP_OK; +#if SOC_PM_CPU_RETENTION_BY_SW + err = esp_sleep_cpu_retention_init_impl(); +#endif + return err; +} + +esp_err_t esp_sleep_cpu_retention_deinit(void) +{ + esp_err_t err = ESP_OK; +#if SOC_PM_CPU_RETENTION_BY_SW + err = esp_sleep_cpu_retention_deinit_impl(); +#endif + return err; +} + + +bool cpu_domain_pd_allowed(void) +{ + bool allowed = true; + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + allowed &= (s_cpu_retention.retent.critical_frame[core_id] != NULL); + allowed &= (s_cpu_retention.retent.non_critical_frame[core_id] != NULL); + } + allowed &= (s_cpu_retention.retent.cache_config_frame != NULL); + for (uint8_t core_id = 0; core_id < portNUM_PROCESSORS; ++core_id) { + allowed &= (s_cpu_retention.retent.clic_frame[core_id] != NULL); + allowed &= (s_cpu_retention.retent.clint_frame[core_id] != NULL); + } + return allowed; +} + +esp_err_t sleep_cpu_configure(bool light_sleep_enable) +{ +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP + if (light_sleep_enable) { + ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_init(), TAG, "Failed to enable CPU power down during light sleep."); + } else { + ESP_RETURN_ON_ERROR(esp_sleep_cpu_retention_deinit(), TAG, "Failed to release CPU retention memory"); + } +#endif + return ESP_OK; +} + +#endif + +#if !CONFIG_FREERTOS_UNICORE +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP +static IRAM_ATTR void smp_core_do_retention(void) +{ + uint8_t core_id = esp_cpu_get_core_id(); + + if (core_id == 0) { + WRITE_PERI_REG(INTPRI_CPU_INTR_FROM_CPU_2_REG, 0); + } else { + WRITE_PERI_REG(INTPRI_CPU_INTR_FROM_CPU_3_REG, 0); + } + + // Wait another core start to do retention + bool smp_skip_retention = false; + smp_retention_state_t another_core_state; + while (1) { + another_core_state = atomic_load(&s_smp_retention_state[!core_id]); + if (another_core_state == SMP_SKIP_RETENTION) { + // If another core skips the retention, the current core should also have to skip it. + smp_skip_retention = true; + break; + } else if (another_core_state == SMP_BACKUP_START) { + break; + } + } + + if (!smp_skip_retention) { + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_START); + uint32_t mstatus = save_mstatus_and_disable_global_int(); + rv_core_noncritical_regs_save(); + cpu_domain_dev_regs_save(s_cpu_retention.retent.clint_frame[core_id]); + cpu_domain_dev_regs_save(s_cpu_retention.retent.clic_frame[core_id]); + rv_core_critical_regs_save(); + RvCoreCriticalSleepFrame *frame_critical = s_cpu_retention.retent.critical_frame[core_id]; + if ((frame_critical->pmufunc & 0x3) == 0x1) { + + atomic_store(&s_smp_retention_state[core_id], SMP_BACKUP_DONE); + // wait another core trigger sleep and wakeup + while (1) { + // If another core's sleep request is rejected by the hardware, jumps out of blocking. + another_core_state = atomic_load(&s_smp_retention_state[!core_id]); + if (another_core_state == SMP_SKIP_RETENTION) { + break; + } + } + } else { + + // Start core1 + if (core_id == 0) { + REG_SET_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_CLK_EN); + REG_CLR_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_RST_EN); + } + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_START); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.clic_frame[core_id]); + cpu_domain_dev_regs_restore(s_cpu_retention.retent.clint_frame[core_id]); + rv_core_noncritical_regs_restore(); + restore_mstatus(mstatus); + atomic_store(&s_smp_retention_state[core_id], SMP_RESTORE_DONE); + } + } + // wait another core out sleep + while (atomic_load(&s_smp_retention_state[!core_id]) != SMP_IDLE) { + ; + } + atomic_store(&s_smp_retention_state[core_id], SMP_IDLE); +} + + +IRAM_ATTR void esp_sleep_cpu_skip_retention(void) { + atomic_store(&s_smp_retention_state[esp_cpu_get_core_id()], SMP_SKIP_RETENTION); +} +#endif + +void sleep_smp_cpu_sleep_prepare(void) +{ +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP + while (atomic_load(&s_smp_retention_state[!esp_cpu_get_core_id()]) != SMP_IDLE) { + ; + } + esp_ipc_isr_call((esp_ipc_isr_func_t)smp_core_do_retention, NULL); +#else + esp_ipc_isr_stall_other_cpu(); +#endif +} + +void sleep_smp_cpu_wakeup_prepare(void) +{ +#if CONFIG_PM_POWER_DOWN_CPU_IN_LIGHT_SLEEP + uint8_t core_id = esp_cpu_get_core_id(); + if (atomic_load(&s_smp_retention_state[core_id]) == SMP_RESTORE_DONE) { + while (atomic_load(&s_smp_retention_state[!core_id]) != SMP_RESTORE_DONE) { + ; + } + } + atomic_store(&s_smp_retention_state[core_id], SMP_IDLE); +#else + esp_ipc_isr_release_other_cpu(); +#endif +} +#endif //!CONFIG_FREERTOS_UNICORE diff --git a/components/esp_hw_support/lowpower/port/esp32h4/sleep_cpu_asm.S b/components/esp_hw_support/lowpower/port/esp32h4/sleep_cpu_asm.S new file mode 100644 index 0000000000..31a6f32832 --- /dev/null +++ b/components/esp_hw_support/lowpower/port/esp32h4/sleep_cpu_asm.S @@ -0,0 +1,255 @@ +/* + * SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "soc/soc.h" +#include "rvsleep-frames.h" +#include "soc/soc_caps.h" +#include "sdkconfig.h" +#include "freertos/FreeRTOSConfig.h" +#define MTVT (0x307) +#if !CONFIG_IDF_TARGET_ESP32C6 && !CONFIG_IDF_TARGET_ESP32H2 && !CONFIG_IDF_TARGET_ESP32C5 && !CONFIG_IDF_TARGET_ESP32H4 +#include "soc/lp_aon_reg.h" +#include "soc/extmem_reg.h" +#endif + + .section .data1,"aw" + .global rv_core_critical_regs_frame + .type rv_core_critical_regs_frame,@object + .align 4 +rv_core_critical_regs_frame: + .rept (portNUM_PROCESSORS) + .word 0 + .endr + +/* +-------------------------------------------------------------------------------- + This assembly subroutine is used to save the critical registers of the CPU + core to the internal RAM before sleep, and modify the PMU control flag to + indicate that the system needs to sleep. When the subroutine returns, it + will return the memory pointer that saves the context information of the CPU + critical registers. +-------------------------------------------------------------------------------- +*/ + + .section .iram1,"ax" + .global rv_core_critical_regs_save + .type rv_core_critical_regs_save,@function + .align 4 + +rv_core_critical_regs_save: + + /* arrived here in critical section. we need: + save riscv core critical registers to RvCoreCriticalSleepFrame + */ + csrw mscratch, t0 /* use mscratch as temp storage */ + la a0, rv_core_critical_regs_frame + csrr t1, mhartid + slli t1, t1, 2 + add a0, a0, t1 + lw t0, 0(a0) /* t0 pointer to RvCoreCriticalSleepFrame object */ + + sw ra, RV_SLP_CTX_RA(t0) + sw sp, RV_SLP_CTX_SP(t0) + sw gp, RV_SLP_CTX_GP(t0) + sw tp, RV_SLP_CTX_TP(t0) + sw t1, RV_SLP_CTX_T1(t0) + sw t2, RV_SLP_CTX_T2(t0) + sw s0, RV_SLP_CTX_S0(t0) + sw s1, RV_SLP_CTX_S1(t0) + + /* a0 is caller saved, so it does not need to be saved, but it should be the + pointer value of RvCoreCriticalSleepFrame for return. + */ + mv a0, t0 + sw a0, RV_SLP_CTX_A0(t0) + + sw a1, RV_SLP_CTX_A1(t0) + sw a2, RV_SLP_CTX_A2(t0) + sw a3, RV_SLP_CTX_A3(t0) + sw a4, RV_SLP_CTX_A4(t0) + sw a5, RV_SLP_CTX_A5(t0) + sw a6, RV_SLP_CTX_A6(t0) + sw a7, RV_SLP_CTX_A7(t0) + sw s2, RV_SLP_CTX_S2(t0) + sw s3, RV_SLP_CTX_S3(t0) + sw s4, RV_SLP_CTX_S4(t0) + sw s5, RV_SLP_CTX_S5(t0) + sw s6, RV_SLP_CTX_S6(t0) + sw s7, RV_SLP_CTX_S7(t0) + sw s8, RV_SLP_CTX_S8(t0) + sw s9, RV_SLP_CTX_S9(t0) + sw s10, RV_SLP_CTX_S10(t0) + sw s11, RV_SLP_CTX_S11(t0) + sw t3, RV_SLP_CTX_T3(t0) + sw t4, RV_SLP_CTX_T4(t0) + sw t5, RV_SLP_CTX_T5(t0) + sw t6, RV_SLP_CTX_T6(t0) + + csrr t1, mstatus + sw t1, RV_SLP_CTX_MSTATUS(t0) + csrr t2, mtvec + sw t2, RV_SLP_CTX_MTVEC(t0) + csrr t3, mcause + sw t3, RV_SLP_CTX_MCAUSE(t0) + + csrr t1, mtval + sw t1, RV_SLP_CTX_MTVAL(t0) + csrr t2, mie + sw t2, RV_SLP_CTX_MIE(t0) + csrr t3, mip + sw t3, RV_SLP_CTX_MIP(t0) + csrr t1, mepc + sw t1, RV_SLP_CTX_MEPC(t0) + + /* + !!! Let idf knows it's going to sleep !!! + + RV_SLP_STK_PMUFUNC field is used to identify whether it is going to sleep or + has just been awakened. We use the lowest 2 bits as indication information, + 3 means being awakened, 1 means going to sleep. + */ + li t1, ~0x3 + lw t2, RV_SLP_CTX_PMUFUNC(t0) + and t2, t1, t2 + ori t2, t2, 0x1 + sw t2, RV_SLP_CTX_PMUFUNC(t0) + + mv t3, t0 + csrr t0, mscratch + sw t0, RV_SLP_CTX_T0(t3) + +#if !CONFIG_IDF_TARGET_ESP32C6 && !CONFIG_IDF_TARGET_ESP32H2 && !CONFIG_IDF_TARGET_ESP32C5 && !CONFIG_IDF_TARGET_ESP32H4 + /* writeback dcache is required here!!! */ + la t0, EXTMEM_CACHE_SYNC_MAP_REG + li t1, 0x10 + sw t1, 0x0(t0) /* set EXTMEM_CACHE_SYNC_MAP_REG bit 4 */ + la t2, EXTMEM_CACHE_SYNC_ADDR_REG + sw zero, 0x0(t2) /* clear EXTMEM_CACHE_SYNC_ADDR_REG */ + la t0, EXTMEM_CACHE_SYNC_SIZE_REG + sw zero, 0x0(t0) /* clear EXTMEM_CACHE_SYNC_SIZE_REG */ + + la t1, EXTMEM_CACHE_SYNC_CTRL_REG + lw t2, 0x0(t1) + ori t2, t2, 0x4 + sw t2, 0x0(t1) + + li t0, 0x10 /* SYNC_DONE bit */ +wait_sync_done: + lw t2, 0x0(t1) + and t2, t0, t2 + beqz t2, wait_sync_done +#endif + + lw t0, RV_SLP_CTX_T0(t3) + lw t1, RV_SLP_CTX_T1(t3) + lw t2, RV_SLP_CTX_T2(t3) + lw t3, RV_SLP_CTX_T3(t3) + + ret + + .size rv_core_critical_regs_save, . - rv_core_critical_regs_save + + +#define CSR_PCER_U 0x800 +#define CSR_PCMR_U 0x801 +#define PCER_CYCLES (1<<0) /* count clock cycles */ +#define PCMR_GLOBAL_EN (1<<0) /* enable count */ +#define pcer CSR_PCER_U +#define pcmr CSR_PCMR_U + +/* +-------------------------------------------------------------------------------- + This assembly subroutine is used to restore the CPU core critical register + context before sleep after system wakes up, modify the PMU control + information, and return the critical register context memory object pointer. + After the subroutine returns, continue to restore other modules of the + system. +-------------------------------------------------------------------------------- +*/ + + .section .iram1,"ax" + .global rv_core_critical_regs_restore + .weak rv_core_critical_regs_restore + .type rv_core_critical_regs_restore,@function + .global _rv_core_critical_regs_restore + .type _rv_core_critical_regs_restore,@function + .align 4 + +_rv_core_critical_regs_restore: /* export a strong symbol to jump to here, used + * for a static callback */ + nop + +rv_core_critical_regs_restore: + + la t0, rv_core_critical_regs_frame + csrr t1, mhartid + slli t1, t1, 2 + add t0, t0, t1 + lw t0, 0(t0) /* t0 pointer to RvCoreCriticalSleepFrame object */ + beqz t0, .skip_restore /* make sure we do not jump to zero address */ + + /* + !!! Let idf knows it's sleep awake. !!! + + RV_SLP_STK_PMUFUNC field is used to identify whether it is going to sleep or + has just been awakened. We use the lowest 2 bits as indication information, + 3 means being awakened, 1 means going to sleep. + */ + lw t1, RV_SLP_CTX_PMUFUNC(t0) + ori t1, t1, 0x3 + sw t1, RV_SLP_CTX_PMUFUNC(t0) + + lw t2, RV_SLP_CTX_MEPC(t0) + csrw mepc, t2 + lw t3, RV_SLP_CTX_MIP(t0) + csrw mip, t3 + lw t1, RV_SLP_CTX_MIE(t0) + csrw mie, t1 + lw t2, RV_SLP_CTX_MSTATUS(t0) + csrw mstatus, t2 + lw t3, RV_SLP_CTX_MTVEC(t0) + csrw mtvec, t3 + lw t1, RV_SLP_CTX_MCAUSE(t0) + csrw mcause, t1 + lw t2, RV_SLP_CTX_MTVAL(t0) + csrw mtval, t2 + + lw t6, RV_SLP_CTX_T6(t0) + lw t5, RV_SLP_CTX_T5(t0) + lw t4, RV_SLP_CTX_T4(t0) + lw t3, RV_SLP_CTX_T3(t0) + lw s11, RV_SLP_CTX_S11(t0) + lw s10, RV_SLP_CTX_S10(t0) + lw s9, RV_SLP_CTX_S9(t0) + lw s8, RV_SLP_CTX_S8(t0) + lw s7, RV_SLP_CTX_S7(t0) + lw s6, RV_SLP_CTX_S6(t0) + lw s5, RV_SLP_CTX_S5(t0) + lw s4, RV_SLP_CTX_S4(t0) + lw s3, RV_SLP_CTX_S3(t0) + lw s2, RV_SLP_CTX_S2(t0) + lw a7, RV_SLP_CTX_A7(t0) + lw a6, RV_SLP_CTX_A6(t0) + lw a5, RV_SLP_CTX_A5(t0) + lw a4, RV_SLP_CTX_A4(t0) + lw a3, RV_SLP_CTX_A3(t0) + lw a2, RV_SLP_CTX_A2(t0) + lw a1, RV_SLP_CTX_A1(t0) + lw a0, RV_SLP_CTX_A0(t0) + lw s1, RV_SLP_CTX_S1(t0) + lw s0, RV_SLP_CTX_S0(t0) + lw t2, RV_SLP_CTX_T2(t0) + lw t1, RV_SLP_CTX_T1(t0) + lw tp, RV_SLP_CTX_TP(t0) + lw gp, RV_SLP_CTX_GP(t0) + lw sp, RV_SLP_CTX_SP(t0) + lw ra, RV_SLP_CTX_RA(t0) + lw t0, RV_SLP_CTX_T0(t0) + +.skip_restore: + ret + + .size rv_core_critical_regs_restore, . - rv_core_critical_regs_restore diff --git a/components/esp_hw_support/sleep_modes.c b/components/esp_hw_support/sleep_modes.c index d5e99cb43e..63a175682f 100644 --- a/components/esp_hw_support/sleep_modes.c +++ b/components/esp_hw_support/sleep_modes.c @@ -195,8 +195,8 @@ #define DEFAULT_SLEEP_OUT_OVERHEAD_US (118) #define DEFAULT_HARDWARE_OUT_OVERHEAD_US (9) #elif CONFIG_IDF_TARGET_ESP32H4 -#define DEFAULT_SLEEP_OUT_OVERHEAD_US (118) -#define DEFAULT_HARDWARE_OUT_OVERHEAD_US (9) +#define DEFAULT_SLEEP_OUT_OVERHEAD_US (318) +#define DEFAULT_HARDWARE_OUT_OVERHEAD_US (56) #elif CONFIG_IDF_TARGET_ESP32P4 #define DEFAULT_SLEEP_OUT_OVERHEAD_US (324) #define DEFAULT_HARDWARE_OUT_OVERHEAD_US (240) diff --git a/components/esp_system/system_init_fn.txt b/components/esp_system/system_init_fn.txt index f5b74cd689..2d6238b0c5 100644 --- a/components/esp_system/system_init_fn.txt +++ b/components/esp_system/system_init_fn.txt @@ -86,6 +86,7 @@ SECONDARY: 106: sleep_clock_startup_init in components/esp_hw_support/lowpower/p SECONDARY: 106: sleep_clock_startup_init in components/esp_hw_support/lowpower/port/esp32c61/sleep_clock.c on BIT(0) SECONDARY: 106: sleep_clock_startup_init in components/esp_hw_support/lowpower/port/esp32h2/sleep_clock.c on BIT(0) SECONDARY: 106: sleep_clock_startup_init in components/esp_hw_support/lowpower/port/esp32h21/sleep_clock.c on BIT(0) +SECONDARY: 106: sleep_clock_startup_init in components/esp_hw_support/lowpower/port/esp32h4/sleep_clock.c on BIT(0) SECONDARY: 106: sleep_clock_startup_init in components/esp_hw_support/lowpower/port/esp32p4/sleep_clock.c on BIT(0) SECONDARY: 107: sleep_sys_periph_startup_init in components/esp_hw_support/sleep_system_peripheral.c on BIT(0) SECONDARY: 108: sleep_mmu_startup_init in components/esp_hw_support/lowpower/port/esp32c5/sleep_mmu.c on BIT(0) diff --git a/components/hal/esp32h4/include/hal/clk_gate_ll.h b/components/hal/esp32h4/include/hal/clk_gate_ll.h new file mode 100644 index 0000000000..dbce34f14f --- /dev/null +++ b/components/hal/esp32h4/include/hal/clk_gate_ll.h @@ -0,0 +1,92 @@ +/* + * SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include "esp_attr.h" +#include "soc/pcr_struct.h" + +/** + * Enable or disable the clock gate for ref_8m. + * @param enable Enable / disable + */ +FORCE_INLINE_ATTR void _clk_gate_ll_ref_8m_clk_en(bool enable) +{ + PCR.pll_div_clk_en.pll_8m_clk_en = enable; +} +/// use a macro to wrap the function, force the caller to use it in a critical section +/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance +#define clk_gate_ll_ref_8m_clk_en(...) (void)__DECLARE_RCC_ATOMIC_ENV; _clk_gate_ll_ref_8m_clk_en(__VA_ARGS__) + +/** + * Enable or disable the clock gate for ref_16m. + * @param enable Enable / disable + */ +FORCE_INLINE_ATTR void _clk_gate_ll_ref_16m_clk_en(bool enable) +{ + PCR.pll_div_clk_en.pll_16m_clk_en = enable; +} +/// use a macro to wrap the function, force the caller to use it in a critical section +/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance +#define clk_gate_ll_ref_16m_clk_en(...) (void)__DECLARE_RCC_ATOMIC_ENV; _clk_gate_ll_ref_16m_clk_en(__VA_ARGS__) + +/** + * Enable or disable the clock gate for ref_32m. + * @param enable Enable / disable + */ +FORCE_INLINE_ATTR void _clk_gate_ll_ref_32m_clk_en(bool enable) +{ + PCR.pll_div_clk_en.pll_32m_clk_en = enable; +} +/// use a macro to wrap the function, force the caller to use it in a critical section +/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance +#define clk_gate_ll_ref_32m_clk_en(...) (void)__DECLARE_RCC_ATOMIC_ENV; _clk_gate_ll_ref_32m_clk_en(__VA_ARGS__) + +/** + * Enable or disable the clock gate for ref_48m. + * @param enable Enable / disable + */ +FORCE_INLINE_ATTR void _clk_gate_ll_ref_48m_clk_en(bool enable) +{ + PCR.pll_div_clk_en.pll_48m_clk_en = enable; +} +/// use a macro to wrap the function, force the caller to use it in a critical section +/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance +#define clk_gate_ll_ref_48m_clk_en(...) (void)__DECLARE_RCC_ATOMIC_ENV; _clk_gate_ll_ref_48m_clk_en(__VA_ARGS__) + +/** + * Enable or disable the clock gate for ref_64m. + * @param enable Enable / disable + */ +FORCE_INLINE_ATTR void _clk_gate_ll_ref_64m_clk_en(bool enable) +{ + PCR.pll_div_clk_en.pll_64m_clk_en = enable; +} +/// use a macro to wrap the function, force the caller to use it in a critical section +/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance +#define clk_gate_ll_ref_64m_clk_en(...) (void)__DECLARE_RCC_ATOMIC_ENV; _clk_gate_ll_ref_64m_clk_en(__VA_ARGS__) + +/** + * Enable or disable the clock gate for ref_96m. + * @param enable Enable / disable + */ +FORCE_INLINE_ATTR void _clk_gate_ll_ref_96m_clk_en(bool enable) +{ + PCR.pll_div_clk_en.pll_96m_clk_en = enable; +} +/// use a macro to wrap the function, force the caller to use it in a critical section +/// the critical section needs to declare the __DECLARE_RCC_ATOMIC_ENV variable in advance +#define clk_gate_ll_ref_96m_clk_en(...) (void)__DECLARE_RCC_ATOMIC_ENV; _clk_gate_ll_ref_96m_clk_en(__VA_ARGS__) + +#ifdef __cplusplus +} +#endif diff --git a/components/soc/esp32h4/include/soc/Kconfig.soc_caps.in b/components/soc/esp32h4/include/soc/Kconfig.soc_caps.in index ca6afb1647..3384598489 100644 --- a/components/soc/esp32h4/include/soc/Kconfig.soc_caps.in +++ b/components/soc/esp32h4/include/soc/Kconfig.soc_caps.in @@ -883,6 +883,10 @@ config SOC_PM_SUPPORT_TOUCH_WAKEUP bool default y +config SOC_PM_SUPPORT_CPU_PD + bool + default y + config SOC_PM_SUPPORT_XTAL32K_PD bool default y @@ -907,6 +911,10 @@ config SOC_PM_SUPPORT_MAC_BB_PD bool default y +config SOC_PM_CPU_RETENTION_BY_SW + bool + default y + config SOC_PM_PAU_LINK_NUM int default 4 diff --git a/components/soc/esp32h4/include/soc/soc_caps.h b/components/soc/esp32h4/include/soc/soc_caps.h index 5b1cce4213..8c6671edcc 100644 --- a/components/soc/esp32h4/include/soc/soc_caps.h +++ b/components/soc/esp32h4/include/soc/soc_caps.h @@ -516,7 +516,7 @@ // #define SOC_PM_SUPPORT_EXT1_WAKEUP (1) // #define SOC_PM_SUPPORT_EXT1_WAKEUP_MODE_PER_PIN (1) /*!