Merge branch 'feat/h4_introduce_step6_esp_system' into 'master'

feat(esp32h4): support esp_system, esp_timer and freertos (stage6)

Closes IDF-12565

See merge request espressif/esp-idf!37269
This commit is contained in:
morris
2025-03-12 14:10:23 +08:00
15 changed files with 1143 additions and 36 deletions

View File

@@ -34,6 +34,8 @@
#include "esp32p4/rom/rtc.h"
#elif CONFIG_IDF_TARGET_ESP32C5
#include "esp32c5/rom/rtc.h"
#elif CONFIG_IDF_TARGET_ESP32H4
#include "esp32h4/rom/rtc.h"
#endif
#include "esp_log.h"
#include "esp_rom_sys.h"
@@ -50,7 +52,7 @@ void bootloader_clock_configure(void)
{
s_warn();
esp_rom_output_tx_wait_idle(0);
#if CONFIG_IDF_TARGET_ESP32H21
#if CONFIG_IDF_TARGET_ESP32H21 || CONFIG_IDF_TARGET_ESP32H4
uint32_t xtal_freq_mhz = 32;
#else
uint32_t xtal_freq_mhz = 40;

View File

@@ -0,0 +1,96 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/* TODO: [ESP32H4] IDF-12305 inherited from verification branch, need check */
/**
* ESP32-H4 Linker Script Memory Layout
* This file describes the memory layout (memory blocks) by virtual memory addresses.
* This linker script is passed through the C preprocessor to include configuration options.
* Please use preprocessor features sparingly!
* Restrict to simple macros with numeric values, and/or #if/#endif blocks.
*/
#include "sdkconfig.h"
#include "ld.common"
/* TODO: IDF-12517 */
#define SRAM_SEG_START 0x40810000
#define SRAM_SEG_END 0x4084f350 /* 2nd stage bootloader iram_loader_seg start address */
#define SRAM_SEG_SIZE SRAM_SEG_END - SRAM_SEG_START
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/*
* IDRAM0_2_SEG_SIZE_DEFAULT is used when page size is 64KB
*/
#define IDRAM0_2_SEG_SIZE (CONFIG_MMU_PAGE_SIZE << 8)
#endif
MEMORY
{
/**
* All these values assume the flash cache is on, and have the blocks this uses subtracted from the length
* of the various regions. The 'data access port' dram/drom regions map to the same iram/irom regions but
* are connected to the data port of the CPU and eg allow byte-wise access.
*/
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/* Flash mapped instruction data */
irom_seg (RX) : org = 0x42000020, len = IDRAM0_2_SEG_SIZE - 0x20
/**
* (0x20 offset above is a convenience for the app binary image generation.
* Flash cache has 64KB pages. The .bin file which is flashed to the chip
* has a 0x18 byte file header, and each segment has a 0x08 byte segment
* header. Setting this offset makes it simple to meet the flash cache MMU's
* constraint that (paddr % 64KB == vaddr % 64KB).)
*/
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/**
* Shared data RAM, excluding memory reserved for ROM bss/data/stack.
* Enabling Bluetooth & Trace Memory features in menuconfig will decrease the amount of RAM available.
*/
sram_seg (RWX) : org = SRAM_SEG_START, len = SRAM_SEG_SIZE
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/* Flash mapped constant data */
drom_seg (R) : org = 0x42000020, len = IDRAM0_2_SEG_SIZE - 0x20
/* (See irom_seg for meaning of 0x20 offset in the above.) */
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
}
/* Heap ends at top of sram_seg */
_heap_end = 0x40000000;
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
REGION_ALIAS("default_code_seg", irom_seg);
#else
REGION_ALIAS("default_code_seg", sram_seg);
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
REGION_ALIAS("default_rodata_seg", drom_seg);
#else
REGION_ALIAS("default_rodata_seg", sram_seg);
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/**
* If rodata default segment is placed in `drom_seg`, then flash's first rodata section must
* also be first in the segment.
*/
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
ASSERT(_flash_rodata_dummy_start == ORIGIN(default_rodata_seg),
".flash_rodata_dummy section must be placed at the beginning of the rodata segment.")
#endif
#if CONFIG_ESP_SYSTEM_USE_EH_FRAME
ASSERT ((__eh_frame_end > __eh_frame), "Error: eh_frame size is null!");
ASSERT ((__eh_frame_hdr_end > __eh_frame_hdr), "Error: eh_frame_hdr size is null!");
#endif

View File

@@ -0,0 +1,316 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/* TODO: [ESP32H4] IDF-12305 inherited from verification branch, need check */
#include "ld.common"
/* Default entry point */
ENTRY(call_start_cpu0);
SECTIONS
{
.iram0.text :
{
_iram_start = ABSOLUTE(.);
/* Vectors go to start of IRAM */
ASSERT(ABSOLUTE(.) % 0x40 == 0, "vector address must be 64 byte aligned");
KEEP(*(.exception_vectors_table.text));
KEEP(*(.exception_vectors.text));
/* Code marked as running out of IRAM */
_iram_text_start = ABSOLUTE(.);
mapping[iram0_text]
} > sram_seg
/* Marks the end of IRAM code segment */
.iram0.text_end (NOLOAD) :
{
/* Align the end of code region as per PMP region granularity */
. = ALIGN(_esp_pmp_align_size);
ALIGNED_SYMBOL(4, _iram_text_end)
} > sram_seg
.iram0.data :
{
ALIGNED_SYMBOL(16, _iram_data_start)
mapping[iram0_data]
_iram_data_end = ABSOLUTE(.);
} > sram_seg
.iram0.bss (NOLOAD) :
{
ALIGNED_SYMBOL(16, _iram_bss_start)
mapping[iram0_bss]
_iram_bss_end = ABSOLUTE(.);
ALIGNED_SYMBOL(16, _iram_end)
} > sram_seg
.dram0.data :
{
_data_start = ABSOLUTE(.);
*(.gnu.linkonce.d.*)
*(.data1)
__global_pointer$ = . + 0x800;
*(.sdata)
*(.sdata.*)
*(.gnu.linkonce.s.*)
*(.gnu.linkonce.s2.*)
*(.jcr)
mapping[dram0_data]
_data_end = ABSOLUTE(.);
} > sram_seg
/**
* This section holds data that should not be initialized at power up.
* The section located in Internal SRAM memory region. The macro _NOINIT
* can be used as attribute to place data into this section.
* See the "esp_attr.h" file for more information.
*/
.noinit (NOLOAD):
{
ALIGNED_SYMBOL(4, _noinit_start)
*(.noinit .noinit.*)
ALIGNED_SYMBOL(4, _noinit_end)
} > sram_seg
/* Shared RAM */
.dram0.bss (NOLOAD) :
{
ALIGNED_SYMBOL(8, _bss_start)
/**
* ldgen places all bss-related data to mapping[dram0_bss]
* (See components/esp_system/app.lf).
*/
mapping[dram0_bss]
ALIGNED_SYMBOL(8, _bss_end)
} > sram_seg
ASSERT(((_bss_end - ORIGIN(sram_seg)) <= LENGTH(sram_seg)), "DRAM segment data does not fit.")
.flash.text :
{
_stext = .;
/**
* Mark the start of flash.text.
* This can be used by the MMU driver to maintain the virtual address.
*/
_instruction_reserved_start = ABSOLUTE(.);
_text_start = ABSOLUTE(.);
mapping[flash_text]
*(.stub)
*(.gnu.linkonce.t.*)
*(.gnu.warning)
*(.irom0.text) /* catch stray ICACHE_RODATA_ATTR */
/**
* CPU will try to prefetch up to 16 bytes of of instructions.
* This means that any configuration (e.g. MMU, PMS) must allow
* safe access to up to 16 bytes after the last real instruction, add
* dummy bytes to ensure this
*/
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
/**
* Mark the flash.text end.
* This can be used for MMU driver to maintain virtual address.
*/
_instruction_reserved_end = ABSOLUTE(.);
_etext = .;
/**
* Similar to _iram_start, this symbol goes here so it is
* resolved by addr2line in preference to the first symbol in
* the flash.text segment.
*/
_flash_cache_start = ABSOLUTE(0);
} > default_code_seg
/**
* Dummy section represents the .flash.text section but in default_rodata_seg.
* Thus, it must have its alignment and (at least) its size.
*/
.flash_rodata_dummy (NOLOAD):
{
_flash_rodata_dummy_start = .;
. = ALIGN(ALIGNOF(.flash.text)) + SIZEOF(.flash.text);
/* Add alignment of MMU page size + 0x20 bytes for the mapping header. */
. = ALIGN(_esp_mmu_page_size) + 0x20;
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
{
/**
* Mark flash.rodata start.
* This can be used for mmu driver to maintain virtual address
*/
_rodata_reserved_start = ABSOLUTE(.);
_rodata_start = ABSOLUTE(.);
/* !DO NOT PUT ANYTHING BEFORE THIS! */
/* Should be the first. App version info. */
*(.rodata_desc .rodata_desc.*)
/* Should be the second. Custom app version info. */
*(.rodata_custom_desc .rodata_custom_desc.*)
/**
* Create an empty gap within this section. Thanks to this, the end of this
* section will match .flash.rodata's begin address. Thus, both sections
* will be merged when creating the final bin image.
*/
. = ALIGN(ALIGNOF(.flash.rodata));
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.flash.appdesc, .flash.rodata)
.flash.rodata : ALIGN(0x10)
{
_flash_rodata_start = ABSOLUTE(.);
mapping[flash_rodata]
*(.irom1.text) /* catch stray ICACHE_RODATA_ATTR */
*(.gnu.linkonce.r.*)
*(.rodata1)
*(.gcc_except_table .gcc_except_table.*)
*(.gnu.linkonce.e.*)
/**
* C++ constructor tables.
*
* Excluding crtbegin.o/crtend.o since IDF doesn't use the toolchain crt.
*
* RISC-V gcc is configured with --enable-initfini-array so it emits
* .init_array section instead. But the init_priority sections will be
* sorted for iteration in ascending order during startup.
* The rest of the init_array sections is sorted for iteration in descending
* order during startup, however. Hence a different section is generated for
* the init_priority functions which is iterated in ascending order during
* startup. The corresponding code can be found in startup.c.
*/
ALIGNED_SYMBOL(4, __init_priority_array_start)
KEEP (*(EXCLUDE_FILE (*crtend.* *crtbegin.*) .init_array.*))
__init_priority_array_end = ABSOLUTE(.);
ALIGNED_SYMBOL(4, __init_array_start)
KEEP (*(EXCLUDE_FILE (*crtend.* *crtbegin.*) .init_array))
__init_array_end = ABSOLUTE(.);
/* Addresses of memory regions reserved via SOC_RESERVE_MEMORY_REGION() */
ALIGNED_SYMBOL(4, soc_reserved_memory_region_start)
KEEP (*(.reserved_memory_address))
soc_reserved_memory_region_end = ABSOLUTE(.);
/* System init functions registered via ESP_SYSTEM_INIT_FN */
ALIGNED_SYMBOL(4, _esp_system_init_fn_array_start)
KEEP (*(SORT_BY_INIT_PRIORITY(.esp_system_init_fn.*)))
_esp_system_init_fn_array_end = ABSOLUTE(.);
_rodata_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(SECTION_AFTER_FLASH_RODATA));
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.flash.rodata, SECTION_AFTER_FLASH_RODATA)
#if EH_FRAME_LINKING_ENABLED
.eh_frame_hdr :
{
ALIGNED_SYMBOL(4, __eh_frame_hdr)
KEEP (*(.eh_frame_hdr))
__eh_frame_hdr_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.eh_frame_hdr, .eh_frame)
.eh_frame :
{
ALIGNED_SYMBOL(4, __eh_frame)
KEEP (*(.eh_frame))
/**
* As we are not linking with crtend.o, which includes the CIE terminator
* (see __FRAME_END__ in libgcc sources), it is manually provided here.
*/
LONG(0);
__eh_frame_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.flash.tdata));
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.eh_frame, .flash.tdata)
#endif // EH_FRAME_LINKING_ENABLED
.flash.tdata :
{
_thread_local_data_start = ABSOLUTE(.);
*(.tdata .tdata.* .gnu.linkonce.td.*)
. = ALIGN(ALIGNOF(.flash.tbss));
_thread_local_data_end = ABSOLUTE(.);
} > default_rodata_seg
ASSERT_SECTIONS_GAP(.flash.tdata, .flash.tbss)
.flash.tbss (NOLOAD) :
{
_thread_local_bss_start = ABSOLUTE(.);
*(.tbss .tbss.* .gnu.linkonce.tb.*)
*(.tcommon .tcommon.*)
_thread_local_bss_end = ABSOLUTE(.);
} > default_rodata_seg
/**
* This section contains all the rodata that is not used
* at runtime, helping to avoid an increase in binary size.
*/
.flash.rodata_noload (NOLOAD) :
{
/**
* This symbol marks the end of flash.rodata. It can be utilized by the MMU
* driver to maintain the virtual address.
* NOLOAD rodata may not be included in this section.
*/
_rodata_reserved_end = ADDR(.flash.tbss);
mapping[rodata_noload]
} > default_rodata_seg
/* Marks the end of data, bss and possibly rodata */
.dram0.heap_start (NOLOAD) :
{
ALIGNED_SYMBOL(16, _heap_start)
} > sram_seg
#include "elf_misc.ld.in"
}
ASSERT(((_iram_end - ORIGIN(sram_seg)) <= LENGTH(sram_seg)),
"IRAM0 segment data does not fit.")
ASSERT(((_heap_start - ORIGIN(sram_seg)) <= LENGTH(sram_seg)),
"DRAM segment data does not fit.")

View File

@@ -61,6 +61,8 @@
#include "hal/l2mem_ll.h"
#elif CONFIG_IDF_TARGET_ESP32H21
#include "esp_memprot.h"
#elif CONFIG_IDF_TARGET_ESP32H4
#include "esp_memprot.h"
#endif
#include "esp_private/cache_utils.h"
@@ -92,6 +94,7 @@
#include "hal/cache_hal.h"
#include "hal/cache_ll.h"
#include "hal/efuse_ll.h"
#include "hal/cpu_utility_ll.h"
#include "soc/periph_defs.h"
#include "esp_cpu.h"
#include "esp_private/esp_clk.h"
@@ -227,16 +230,8 @@ void IRAM_ATTR call_start_cpu1(void)
esp_rom_output_set_as_console(CONFIG_ESP_CONSOLE_ROM_SERIAL_PORT_NUM);
#endif
#if CONFIG_IDF_TARGET_ESP32
DPORT_REG_SET_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_PDEBUG_ENABLE | DPORT_APP_CPU_RECORD_ENABLE);
DPORT_REG_CLR_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_RECORD_ENABLE);
#elif CONFIG_IDF_TARGET_ESP32P4
REG_SET_BIT(ASSIST_DEBUG_CORE_1_RCD_EN_REG, ASSIST_DEBUG_CORE_1_RCD_PDEBUGEN);
REG_SET_BIT(ASSIST_DEBUG_CORE_1_RCD_EN_REG, ASSIST_DEBUG_CORE_1_RCD_RECORDEN);
#else
REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_PDEBUGENABLE_REG, 1);
REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_RECORDING_REG, 1);
#endif
cpu_utility_ll_enable_debug(1);
cpu_utility_ll_enable_record(1);
s_cpu_up[1] = true;
ESP_EARLY_LOGD(TAG, "App cpu up");
@@ -290,28 +285,7 @@ static void start_other_core(void)
// enabled clock and taken APP CPU out of reset. In this case don't reset
// APP CPU again, as that will clear the breakpoints which may have already
// been set.
#if CONFIG_IDF_TARGET_ESP32
if (!DPORT_GET_PERI_REG_MASK(DPORT_APPCPU_CTRL_B_REG, DPORT_APPCPU_CLKGATE_EN)) {
DPORT_SET_PERI_REG_MASK(DPORT_APPCPU_CTRL_B_REG, DPORT_APPCPU_CLKGATE_EN);
DPORT_CLEAR_PERI_REG_MASK(DPORT_APPCPU_CTRL_C_REG, DPORT_APPCPU_RUNSTALL);
DPORT_SET_PERI_REG_MASK(DPORT_APPCPU_CTRL_A_REG, DPORT_APPCPU_RESETTING);
DPORT_CLEAR_PERI_REG_MASK(DPORT_APPCPU_CTRL_A_REG, DPORT_APPCPU_RESETTING);
}
#elif CONFIG_IDF_TARGET_ESP32S3
if (!REG_GET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_CLKGATE_EN)) {
REG_SET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_CLKGATE_EN);
REG_CLR_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RUNSTALL);
REG_SET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING);
REG_CLR_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING);
}
#elif CONFIG_IDF_TARGET_ESP32P4
if (!REG_GET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN)) {
REG_SET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN);
}
if (REG_GET_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL)) {
REG_CLR_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL);
}
#endif
cpu_utility_ll_enable_clock_and_reset_app_cpu();
ets_set_appcpu_boot_addr((uint32_t)call_start_cpu1);

View File

@@ -0,0 +1,8 @@
set(srcs "clk.c"
"reset_reason.c"
"system_internal.c"
"cache_err_int.c")
add_prefix(srcs "${CMAKE_CURRENT_LIST_DIR}/" ${srcs})
target_sources(${COMPONENT_LIB} PRIVATE ${srcs})

View File

@@ -0,0 +1,17 @@
choice ESP_DEFAULT_CPU_FREQ_MHZ
prompt "CPU frequency"
default ESP_DEFAULT_CPU_FREQ_MHZ_64 if IDF_ENV_FPGA
help
CPU frequency to be set on application startup.
config ESP_DEFAULT_CPU_FREQ_MHZ_32
bool "32 MHz"
depends on IDF_ENV_FPGA
config ESP_DEFAULT_CPU_FREQ_MHZ_64
bool "64 MHz"
endchoice
config ESP_DEFAULT_CPU_FREQ_MHZ
int
default 32 if ESP_DEFAULT_CPU_FREQ_MHZ_32
default 64 if ESP_DEFAULT_CPU_FREQ_MHZ_64

View File

@@ -0,0 +1,45 @@
menu "Brownout Detector"
config ESP_BROWNOUT_DET
bool "Hardware brownout detect & reset"
depends on !IDF_ENV_FPGA
default y
help
The ESP32-H4 has a built-in brownout detector which can detect if the voltage is lower than
a specific value. If this happens, it will reset the chip in order to prevent unintended
behaviour.
choice ESP_BROWNOUT_DET_LVL_SEL
prompt "Brownout voltage level"
depends on ESP_BROWNOUT_DET
default ESP_BROWNOUT_DET_LVL_SEL_7
help
The brownout detector will reset the chip when the supply voltage is approximately
below this level. Note that there may be some variation of brownout voltage level
between each chip.
#The voltage levels here are estimates, more work needs to be done to figure out the exact voltages
#of the brownout threshold levels.
config ESP_BROWNOUT_DET_LVL_SEL_7
bool "2.51V"
config ESP_BROWNOUT_DET_LVL_SEL_6
bool "2.64V"
config ESP_BROWNOUT_DET_LVL_SEL_5
bool "2.76V"
config ESP_BROWNOUT_DET_LVL_SEL_4
bool "2.92V"
config ESP_BROWNOUT_DET_LVL_SEL_3
bool "3.10V"
config ESP_BROWNOUT_DET_LVL_SEL_2
bool "3.27V"
endchoice
config ESP_BROWNOUT_DET_LVL
int
default 2 if ESP_BROWNOUT_DET_LVL_SEL_2
default 3 if ESP_BROWNOUT_DET_LVL_SEL_3
default 4 if ESP_BROWNOUT_DET_LVL_SEL_4
default 5 if ESP_BROWNOUT_DET_LVL_SEL_5
default 6 if ESP_BROWNOUT_DET_LVL_SEL_6
default 7 if ESP_BROWNOUT_DET_LVL_SEL_7
endmenu

View File

@@ -0,0 +1,80 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
The cache has an interrupt that can be raised as soon as an access to a cached
region (flash) is done without the cache being enabled. We use that here
to panic the CPU, which from a debugging perspective is better than grabbing bad
data from the bus.
*/
#include "esp_rom_sys.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_intr_alloc.h"
#include "soc/periph_defs.h"
#include "riscv/interrupt.h"
#include "hal/cache_ll.h"
#include "esp_private/cache_err_int.h"
// TODO: [ESP32H4] IDF-12288 inherited from verification branch, need check
static const char *TAG = "CACHE_ERR";
const char cache_error_msg[] = "Cache access error";
void esp_cache_err_get_panic_info(esp_cache_err_info_t *err_info)
{
if (err_info == NULL) {
return;
}
const uint32_t access_err_status = cache_ll_l1_get_access_error_intr_status(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
/* Return the error string if a cache error is active */
err_info->err_str = access_err_status ? cache_error_msg : NULL;
}
bool esp_cache_err_has_active_err(void)
{
return cache_ll_l1_get_access_error_intr_status(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
}
void esp_cache_err_int_init(void)
{
const uint32_t core_id = 0;
/* Disable cache interrupts if enabled. */
ESP_INTR_DISABLE(ETS_CACHEERR_INUM);
/**
* Bind all cache errors to ETS_CACHEERR_INUM interrupt. we will deal with
* them in handler by different types
*
* On ESP32H4 boards, the cache is a shared one but buses are still
* distinct. So, we have an bus0 and a bus1 sharing the same cache.
* This error can occur if a bus performs a request but the cache
* is disabled.
*/
esp_rom_route_intr_matrix(core_id, ETS_CACHE_INTR_SOURCE, ETS_CACHEERR_INUM);
/* Set the type and priority to cache error interrupts. */
esprv_int_set_type(ETS_CACHEERR_INUM, INTR_TYPE_LEVEL);
esprv_int_set_priority(ETS_CACHEERR_INUM, SOC_INTERRUPT_LEVEL_MEDIUM);
ESP_DRAM_LOGV(TAG, "access error intr clr & ena mask is: 0x%x", CACHE_LL_L1_ACCESS_EVENT_MASK);
/* On the hardware side, start by clearing all the bits responsible for cache access error */
cache_ll_l1_clear_access_error_intr(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
/* Then enable cache access error interrupts. */
cache_ll_l1_enable_access_error_intr(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
/* Enable the interrupts for cache error. */
ESP_INTR_ENABLE(ETS_CACHEERR_INUM);
}
int esp_cache_err_get_cpuid(void)
{
// TODO
return 0;
}

View File

@@ -0,0 +1,178 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/time.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_clk_internal.h"
#include "esp32h4/rom/ets_sys.h"
#include "esp32h4/rom/uart.h"
#include "soc/soc.h"
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/i2s_reg.h"
#include "esp_cpu.h"
#include "hal/wdt_hal.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
#include "esp_private/esp_pmu.h"
#include "esp_rom_uart.h"
#include "esp_rom_sys.h"
//TODO: [ESP32H4] IDF-12285 inherited from verification branch, need check
/* Number of cycles to wait from the 32k XTAL oscillator to consider it running.
* Larger values increase startup delay. Smaller values may cause false positive
* detection (i.e. oscillator runs for a few cycles and then stops).
*/
#define SLOW_CLK_CAL_CYCLES CONFIG_RTC_CLK_CAL_CYCLES
#define MHZ (1000000)
static void select_rtc_slow_clk(soc_rtc_slow_clk_src_t rtc_slow_clk_src);
static const char *TAG = "clk";
__attribute__((weak)) void esp_clk_init(void)
{
#if !CONFIG_IDF_ENV_FPGA
pmu_init();
assert(rtc_clk_xtal_freq_get() == RTC_XTAL_FREQ_40M);
rtc_clk_8m_enable(true);
rtc_clk_fast_src_set(SOC_RTC_FAST_CLK_SRC_RC_FAST);
#endif
#ifdef CONFIG_BOOTLOADER_WDT_ENABLE
// WDT uses a SLOW_CLK clock source. After a function select_rtc_slow_clk a frequency of this source can changed.
// If the frequency changes from 150kHz to 32kHz, then the timeout set for the WDT will increase 4.6 times.
// Therefore, for the time of frequency change, set a new lower timeout value (1.6 sec).
// This prevents excessive delay before resetting in case the supply voltage is drawdown.
// (If frequency is changed from 150kHz to 32kHz then WDT timeout will increased to 1.6sec * 150/32 = 7.5 sec).
wdt_hal_context_t rtc_wdt_ctx = RWDT_HAL_CONTEXT_DEFAULT();
uint32_t stage_timeout_ticks = (uint32_t)(1600ULL * rtc_clk_slow_freq_get_hz() / 1000ULL);
wdt_hal_write_protect_disable(&rtc_wdt_ctx);
wdt_hal_feed(&rtc_wdt_ctx);
//Bootloader has enabled RTC WDT until now. We're only modifying timeout, so keep the stage and timeout action the same
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC);
wdt_hal_write_protect_enable(&rtc_wdt_ctx);
#endif
#if defined(CONFIG_RTC_CLK_SRC_EXT_CRYS)
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_XTAL32K);
#elif defined(CONFIG_RTC_CLK_SRC_EXT_OSC)
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_OSC_SLOW);
#elif defined(CONFIG_RTC_CLK_SRC_INT_RC32K)
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_RC32K);
#else
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_RC_SLOW);
#endif
#ifdef CONFIG_BOOTLOADER_WDT_ENABLE
// After changing a frequency WDT timeout needs to be set for new frequency.
stage_timeout_ticks = (uint32_t)((uint64_t)CONFIG_BOOTLOADER_WDT_TIME_MS * rtc_clk_slow_freq_get_hz() / 1000);
wdt_hal_write_protect_disable(&rtc_wdt_ctx);
wdt_hal_feed(&rtc_wdt_ctx);
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC);
wdt_hal_write_protect_enable(&rtc_wdt_ctx);
#endif
rtc_cpu_freq_config_t old_config, new_config;
rtc_clk_cpu_freq_get_config(&old_config);
const uint32_t old_freq_mhz = old_config.freq_mhz;
const uint32_t new_freq_mhz = CONFIG_ESP_DEFAULT_CPU_FREQ_MHZ;
bool res = rtc_clk_cpu_freq_mhz_to_config(new_freq_mhz, &new_config);
assert(res);
// Wait for UART TX to finish, otherwise some UART output will be lost
// when switching APB frequency
esp_rom_output_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
if (res) {
rtc_clk_cpu_freq_set_config(&new_config);
}
// Re calculate the ccount to make time calculation correct.
esp_cpu_set_cycle_count((uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz);
}
static void select_rtc_slow_clk(soc_rtc_slow_clk_src_t rtc_slow_clk_src)
{
uint32_t cal_val = 0;
/* number of times to repeat 32k XTAL calibration
* before giving up and switching to the internal RC
*/
int retry_32k_xtal = 3;
do {
if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_XTAL32K || rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_OSC_SLOW) {
/* 32k XTAL oscillator needs to be enabled and running before it can
* be used. Hardware doesn't have a direct way of checking if the
* oscillator is running. Here we use rtc_clk_cal function to count
* the number of main XTAL cycles in the given number of 32k XTAL
* oscillator cycles. If the 32k XTAL has not started up, calibration
* will time out, returning 0.
*/
ESP_EARLY_LOGD(TAG, "waiting for 32k oscillator to start up");
rtc_cal_sel_t cal_sel = 0;
if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_XTAL32K) {
rtc_clk_32k_enable(true);
cal_sel = RTC_CAL_32K_XTAL;
} else if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_OSC_SLOW) {
rtc_clk_32k_enable_external();
cal_sel = RTC_CAL_32K_OSC_SLOW;
}
// When SLOW_CLK_CAL_CYCLES is set to 0, clock calibration will not be performed at startup.
if (SLOW_CLK_CAL_CYCLES > 0) {
cal_val = rtc_clk_cal(cal_sel, SLOW_CLK_CAL_CYCLES);
if (cal_val == 0) {
if (retry_32k_xtal-- > 0) {
continue;
}
ESP_EARLY_LOGW(TAG, "32 kHz clock not found, switching to internal 150 kHz oscillator");
rtc_slow_clk_src = SOC_RTC_SLOW_CLK_SRC_RC_SLOW;
}
}
} else if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_RC32K) {
rtc_clk_rc32k_enable(true);
}
rtc_clk_slow_src_set(rtc_slow_clk_src);
if (SLOW_CLK_CAL_CYCLES > 0) {
/* TODO: 32k XTAL oscillator has some frequency drift at startup.
* Improve calibration routine to wait until the frequency is stable.
*/
cal_val = rtc_clk_cal(RTC_CAL_RTC_MUX, SLOW_CLK_CAL_CYCLES);
} else {
const uint64_t cal_dividend = (1ULL << RTC_CLK_CAL_FRACT) * 1000000ULL;
cal_val = (uint32_t)(cal_dividend / rtc_clk_slow_freq_get_hz());
}
} while (cal_val == 0);
ESP_EARLY_LOGD(TAG, "RTC_SLOW_CLK calibration value: %d", cal_val);
esp_clk_slowclk_cal_set(cal_val);
}
void rtc_clk_select_rtc_slow_clk(void)
{
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_XTAL32K);
}
/* This function is not exposed as an API at this point.
* All peripheral clocks are default enabled after chip is powered on.
* This function disables some peripheral clocks when cpu starts.
* These peripheral clocks are enabled when the peripherals are initialized
* and disabled when they are de-initialized.
*/
__attribute__((weak)) void esp_perip_clk_init(void)
{
ESP_EARLY_LOGW(TAG, "esp_perip_clk_init() has not been implemented yet");
}

View File

@@ -0,0 +1,117 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_system.h"
#include "esp_rom_sys.h"
#include "esp_private/system_internal.h"
#include "soc/rtc_periph.h"
#include "esp32h4/rom/rtc.h"
// TODO: [ESP32H4] IDF-12307 inherited from verification branch, need check
// IDF-11910 need refactor
static void esp_reset_reason_clear_hint(void);
static esp_reset_reason_t s_reset_reason;
static esp_reset_reason_t get_reset_reason(soc_reset_reason_t rtc_reset_reason, esp_reset_reason_t reset_reason_hint)
{
switch (rtc_reset_reason) {
case RESET_REASON_CHIP_POWER_ON:
return ESP_RST_POWERON;
case RESET_REASON_CPU0_SW:
case RESET_REASON_CORE_SW:
if (reset_reason_hint == ESP_RST_PANIC ||
reset_reason_hint == ESP_RST_BROWNOUT ||
reset_reason_hint == ESP_RST_TASK_WDT ||
reset_reason_hint == ESP_RST_INT_WDT) {
return reset_reason_hint;
}
return ESP_RST_SW;
case RESET_REASON_CORE_DEEP_SLEEP:
return ESP_RST_DEEPSLEEP;
case RESET_REASON_CORE_MWDT0:
return ESP_RST_TASK_WDT;
case RESET_REASON_CORE_MWDT1:
return ESP_RST_INT_WDT;
case RESET_REASON_CORE_RTC_WDT:
case RESET_REASON_SYS_RTC_WDT:
case RESET_REASON_SYS_SUPER_WDT:
case RESET_REASON_CPU0_RTC_WDT:
case RESET_REASON_CPU0_MWDT0:
case RESET_REASON_CPU0_MWDT1:
return ESP_RST_WDT;
case RESET_REASON_SYS_BROWN_OUT:
return ESP_RST_BROWNOUT;
case RESET_REASON_CORE_USB_UART:
case RESET_REASON_CORE_USB_JTAG:
return ESP_RST_USB;
default:
return ESP_RST_UNKNOWN;
}
}
static void __attribute__((constructor)) esp_reset_reason_init(void)
{
esp_reset_reason_t hint = esp_reset_reason_get_hint();
s_reset_reason = get_reset_reason(esp_rom_get_reset_reason(PRO_CPU_NUM), hint);
if (hint != ESP_RST_UNKNOWN) {
esp_reset_reason_clear_hint();
}
}
esp_reset_reason_t esp_reset_reason(void)
{
return s_reset_reason;
}
/* Reset reason hint is stored in RTC_RESET_CAUSE_REG, a.k.a. RTC_CNTL_STORE6_REG,
* a.k.a. RTC_ENTRY_ADDR_REG. It is safe to use this register both for the
* deep sleep wake stub entry address and for reset reason hint, since wake stub
* is only used for deep sleep reset, and in this case the reason provided by
* esp_rom_get_reset_reason is unambiguous.
*
* Same layout is used as for RTC_APB_FREQ_REG (a.k.a. RTC_CNTL_STORE5_REG):
* the value is replicated in low and high half-words. In addition to that,
* MSB is set to 1, which doesn't happen when RTC_CNTL_STORE6_REG contains
* deep sleep wake stub address.
*/
#define RST_REASON_BIT 0x80000000
#define RST_REASON_MASK 0x7FFF
#define RST_REASON_SHIFT 16
/* in IRAM, can be called from panic handler */
void IRAM_ATTR esp_reset_reason_set_hint(esp_reset_reason_t hint)
{
assert((hint & (~RST_REASON_MASK)) == 0);
uint32_t val = hint | (hint << RST_REASON_SHIFT) | RST_REASON_BIT;
REG_WRITE(RTC_RESET_CAUSE_REG, val);
}
/* in IRAM, can be called from panic handler */
esp_reset_reason_t esp_reset_reason_get_hint(void)
{
uint32_t reset_reason_hint = REG_READ(RTC_RESET_CAUSE_REG);
uint32_t high = (reset_reason_hint >> RST_REASON_SHIFT) & RST_REASON_MASK;
uint32_t low = reset_reason_hint & RST_REASON_MASK;
if ((reset_reason_hint & RST_REASON_BIT) == 0 || high != low) {
return ESP_RST_UNKNOWN;
}
return (esp_reset_reason_t) low;
}
static inline void esp_reset_reason_clear_hint(void)
{
REG_WRITE(RTC_RESET_CAUSE_REG, 0);
}

View File

@@ -0,0 +1,124 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include "sdkconfig.h"
#include "esp_system.h"
#include "esp_private/system_internal.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_rom_sys.h"
#include "riscv/rv_utils.h"
#include "esp_rom_uart.h"
#include "soc/gpio_reg.h"
#include "esp_cpu.h"
#include "soc/rtc.h"
#include "esp_private/rtc_clk.h"
#include "soc/rtc_periph.h"
#include "soc/uart_reg.h"
#include "hal/wdt_hal.h"
#include "esp32h4/rom/cache.h"
// TODO: IDF-11911 need refactor
void IRAM_ATTR esp_system_reset_modules_on_exit(void)
{
// Flush any data left in UART FIFOs before reset the UART peripheral
for (int i = 0; i < SOC_UART_HP_NUM; ++i) {
if (uart_ll_is_enabled(i)) {
esp_rom_output_tx_wait_idle(i);
}
}
// Set Peripheral clk rst
SET_PERI_REG_MASK(PCR_MSPI_CONF_REG, PCR_MSPI_RST_EN);
SET_PERI_REG_MASK(PCR_UART0_CONF_REG, PCR_UART0_RST_EN);
SET_PERI_REG_MASK(PCR_UART1_CONF_REG, PCR_UART1_RST_EN);
SET_PERI_REG_MASK(PCR_SYSTIMER_CONF_REG, PCR_SYSTIMER_RST_EN);
SET_PERI_REG_MASK(PCR_GDMA_CONF_REG, PCR_GDMA_RST_EN);
SET_PERI_REG_MASK(PCR_MODEM_CONF_REG, PCR_MODEM_RST_EN);
SET_PERI_REG_MASK(PCR_PWM0_CONF_REG, PCR_PWM0_RST_EN);
SET_PERI_REG_MASK(PCR_PWM1_CONF_REG, PCR_PWM1_RST_EN);
// Clear Peripheral clk rst
CLEAR_PERI_REG_MASK(PCR_MSPI_CONF_REG, PCR_MSPI_RST_EN);
CLEAR_PERI_REG_MASK(PCR_UART0_CONF_REG, PCR_UART0_RST_EN);
CLEAR_PERI_REG_MASK(PCR_UART1_CONF_REG, PCR_UART1_RST_EN);
CLEAR_PERI_REG_MASK(PCR_SYSTIMER_CONF_REG, PCR_SYSTIMER_RST_EN);
CLEAR_PERI_REG_MASK(PCR_GDMA_CONF_REG, PCR_GDMA_RST_EN);
CLEAR_PERI_REG_MASK(PCR_MODEM_CONF_REG, PCR_MODEM_RST_EN);
CLEAR_PERI_REG_MASK(PCR_PWM0_CONF_REG, PCR_PWM0_RST_EN);
CLEAR_PERI_REG_MASK(PCR_PWM1_CONF_REG, PCR_PWM1_RST_EN);
}
/* "inner" restart function for after RTOS, interrupts & anything else on this
* core are already stopped. Stalls other core, resets hardware,
* triggers restart.
*/
void IRAM_ATTR esp_restart_noos(void)
{
// Disable interrupts
rv_utils_intr_global_disable();
// Enable RTC watchdog for 1 second
wdt_hal_context_t rtc_wdt_ctx;
wdt_hal_init(&rtc_wdt_ctx, WDT_RWDT, 0, false);
uint32_t stage_timeout_ticks = (uint32_t)(1000ULL * rtc_clk_slow_freq_get_hz() / 1000ULL);
wdt_hal_write_protect_disable(&rtc_wdt_ctx);
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_SYSTEM);
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE1, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC);
//Enable flash boot mode so that flash booting after restart is protected by the RTC WDT.
wdt_hal_set_flashboot_en(&rtc_wdt_ctx, true);
wdt_hal_write_protect_enable(&rtc_wdt_ctx);
const uint32_t core_id = esp_cpu_get_core_id();
#if !CONFIG_FREERTOS_UNICORE
const uint32_t other_core_id = (core_id == 0) ? 1 : 0;
esp_cpu_reset(other_core_id);
esp_cpu_stall(other_core_id);
#endif
// Disable TG0/TG1 watchdogs
wdt_hal_context_t wdt0_context = {.inst = WDT_MWDT0, .mwdt_dev = &TIMERG0};
wdt_hal_write_protect_disable(&wdt0_context);
wdt_hal_disable(&wdt0_context);
wdt_hal_write_protect_enable(&wdt0_context);
wdt_hal_context_t wdt1_context = {.inst = WDT_MWDT1, .mwdt_dev = &TIMERG1};
wdt_hal_write_protect_disable(&wdt1_context);
wdt_hal_disable(&wdt1_context);
wdt_hal_write_protect_enable(&wdt1_context);
// Disable cache
Cache_Disable_Cache(CACHE_MAP_ALL);
esp_system_reset_modules_on_exit();
// Set CPU back to XTAL source, same as hard reset, but keep BBPLL on so that USB Serial JTAG can log at 1st stage bootloader.
#if !CONFIG_IDF_ENV_FPGA
rtc_clk_cpu_set_to_default_config();
#endif
// Reset CPUs
if (core_id == 0) {
// Running on PRO CPU: APP CPU is stalled. Can reset both CPUs.
#if !CONFIG_FREERTOS_UNICORE
esp_cpu_reset(1);
#endif
esp_cpu_reset(0);
}
#if !CONFIG_FREERTOS_UNICORE
else {
// Running on APP CPU: need to reset PRO CPU and unstall it,
// then reset APP CPU
esp_cpu_reset(0);
esp_cpu_unstall(0);
esp_cpu_reset(1);
}
#endif
while (true) {
;
}
}

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -8,6 +8,7 @@
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/dport_reg.h"
#include "esp_attr.h"
#ifdef __cplusplus
@@ -63,6 +64,37 @@ FORCE_INLINE_ATTR void cpu_utility_ll_unstall_cpu(uint32_t cpu_no)
int rtc_cntl_c1 = (cpu_no == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1);
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_debug(uint32_t cpu_no)
{
if (cpu_no == 0) {
DPORT_REG_SET_BIT(DPORT_PRO_CPU_RECORD_CTRL_REG, DPORT_PRO_CPU_PDEBUG_ENABLE);
} else {
DPORT_REG_SET_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_PDEBUG_ENABLE);
}
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_record(uint32_t cpu_no)
{
if (cpu_no == 0) {
DPORT_REG_SET_BIT(DPORT_PRO_CPU_RECORD_CTRL_REG, DPORT_PRO_CPU_RECORD_ENABLE);
DPORT_REG_CLR_BIT(DPORT_PRO_CPU_RECORD_CTRL_REG, DPORT_PRO_CPU_RECORD_ENABLE);
} else {
DPORT_REG_SET_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_RECORD_ENABLE);
DPORT_REG_CLR_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_RECORD_ENABLE);
}
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_clock_and_reset_app_cpu(void)
{
if (!DPORT_GET_PERI_REG_MASK(DPORT_APPCPU_CTRL_B_REG, DPORT_APPCPU_CLKGATE_EN)) {
DPORT_SET_PERI_REG_MASK(DPORT_APPCPU_CTRL_B_REG, DPORT_APPCPU_CLKGATE_EN);
DPORT_CLEAR_PERI_REG_MASK(DPORT_APPCPU_CTRL_C_REG, DPORT_APPCPU_RUNSTALL);
DPORT_SET_PERI_REG_MASK(DPORT_APPCPU_CTRL_A_REG, DPORT_APPCPU_RESETTING);
DPORT_CLEAR_PERI_REG_MASK(DPORT_APPCPU_CTRL_A_REG, DPORT_APPCPU_RESETTING);
}
}
#endif // SOC_CPU_CORES_NUM > 1
#ifdef __cplusplus

View File

@@ -0,0 +1,56 @@
/*
* SPDX-FileCopyrightText: 2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#include "soc/soc.h"
#include "soc/lp_aon_reg.h"
#include "soc/lp_aon_struct.h"
#include "soc/pcr_reg.h"
#include "esp_attr.h"
//TODO: [ESP32H4] IDF-12484, need check
#ifdef __cplusplus
extern "C" {
#endif
FORCE_INLINE_ATTR void cpu_utility_ll_reset_cpu(uint32_t cpu_no)
{
if (cpu_no == 0) {
LP_AON.cpucore_cfg.aon_cpu_core0_sw_reset = 1;
} else {
LP_AON.cpucore_cfg.aon_cpu_core1_sw_reset = 1;
}
}
FORCE_INLINE_ATTR uint32_t cpu_utility_ll_wait_mode(void)
{
return REG_GET_BIT(PCR_CPU_WAITI_CONF_REG, PCR_CPU0_WAIT_MODE_FORCE_ON);
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_debug(uint32_t cpu_no)
{
// TODO
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_record(uint32_t cpu_no)
{
// TODO
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_clock_and_reset_app_cpu(void)
{
if (!REG_GET_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_CLK_EN)) {
REG_SET_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_CLK_EN);
}
if (REG_GET_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_RST_EN)) {
REG_CLR_BIT(PCR_CORE1_CONF_REG, PCR_CORE1_RST_EN);
}
}
#ifdef __cplusplus
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -10,6 +10,8 @@
#include "soc/lp_clkrst_struct.h"
#include "soc/pmu_struct.h"
#include "soc/hp_system_reg.h"
#include "soc/hp_sys_clkrst_reg.h"
#include "soc/assist_debug_reg.h"
#include "esp_attr.h"
#include "hal/misc.h"
@@ -48,6 +50,35 @@ FORCE_INLINE_ATTR void cpu_utility_ll_unstall_cpu(uint32_t cpu_no)
while(REG_GET_BIT(HP_SYSTEM_CPU_CORESTALLED_ST_REG, HP_SYSTEM_REG_CORE1_CORESTALLED_ST));
}
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_debug(uint32_t cpu_no)
{
if (cpu_no == 0) {
REG_SET_BIT(ASSIST_DEBUG_CORE_0_RCD_EN_REG, ASSIST_DEBUG_CORE_0_RCD_PDEBUGEN);
} else {
REG_SET_BIT(ASSIST_DEBUG_CORE_1_RCD_EN_REG, ASSIST_DEBUG_CORE_1_RCD_PDEBUGEN);
}
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_record(uint32_t cpu_no)
{
if (cpu_no == 0) {
REG_SET_BIT(ASSIST_DEBUG_CORE_0_RCD_EN_REG, ASSIST_DEBUG_CORE_0_RCD_RECORDEN);
} else {
REG_SET_BIT(ASSIST_DEBUG_CORE_1_RCD_EN_REG, ASSIST_DEBUG_CORE_1_RCD_RECORDEN);
}
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_clock_and_reset_app_cpu(void)
{
if (!REG_GET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN)) {
REG_SET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN);
}
if (REG_GET_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL)) {
REG_CLR_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL);
}
}
#endif // SOC_CPU_CORES_NUM > 1
FORCE_INLINE_ATTR uint32_t cpu_utility_ll_wait_mode(void)

View File

@@ -1,5 +1,5 @@
/*
* SPDX-FileCopyrightText: 2024 Espressif Systems (Shanghai) CO LTD
* SPDX-FileCopyrightText: 2024-2025 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -7,7 +7,9 @@
#pragma once
#include "soc/soc.h"
#include "soc/soc_caps.h"
#include "soc/system_reg.h"
#include "soc/rtc_cntl_reg.h"
#include "soc/assist_debug_reg.h"
#include "esp_attr.h"
#ifdef __cplusplus
@@ -63,6 +65,35 @@ FORCE_INLINE_ATTR void cpu_utility_ll_unstall_cpu(uint32_t cpu_no)
int rtc_cntl_c1 = (cpu_no == 0) ? RTC_CNTL_SW_STALL_PROCPU_C1_M : RTC_CNTL_SW_STALL_APPCPU_C1_M;
CLEAR_PERI_REG_MASK(RTC_CNTL_SW_CPU_STALL_REG, rtc_cntl_c1);
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_debug(uint32_t cpu_no)
{
if (cpu_no == 0) {
REG_WRITE(ASSIST_DEBUG_CORE_0_RCD_PDEBUGENABLE_REG, 1);
} else {
REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_PDEBUGENABLE_REG, 1);
}
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_record(uint32_t cpu_no)
{
if (cpu_no == 0) {
REG_WRITE(ASSIST_DEBUG_CORE_0_RCD_RECORDING_REG, 1);
} else {
REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_RECORDING_REG, 1);
}
}
FORCE_INLINE_ATTR void cpu_utility_ll_enable_clock_and_reset_app_cpu(void)
{
if (!REG_GET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_CLKGATE_EN)) {
REG_SET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_CLKGATE_EN);
REG_CLR_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RUNSTALL);
REG_SET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING);
REG_CLR_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING);
}
}
#endif // SOC_CPU_CORES_NUM > 1
#ifdef __cplusplus