feat(esp_system): base support on p4

This commit is contained in:
Armando
2023-07-21 12:36:57 +08:00
committed by Armando (Dou Yiwen)
parent fd096c012d
commit a336b94527
17 changed files with 1306 additions and 11 deletions

View File

@@ -101,6 +101,7 @@ menu "ESP System Settings"
default y if IDF_TARGET_ESP32S3 default y if IDF_TARGET_ESP32S3
default y if IDF_TARGET_ESP32C6 default y if IDF_TARGET_ESP32C6
default n if IDF_TARGET_ESP32H2 # IDF-5667 default n if IDF_TARGET_ESP32H2 # IDF-5667
default y if IDF_TARGET_ESP32P4
depends on SOC_RTC_FAST_MEM_SUPPORTED depends on SOC_RTC_FAST_MEM_SUPPORTED
config ESP_SYSTEM_ALLOW_RTC_FAST_MEM_AS_HEAP config ESP_SYSTEM_ALLOW_RTC_FAST_MEM_AS_HEAP
@@ -316,6 +317,7 @@ menu "ESP System Settings"
default 20 if IDF_TARGET_ESP32C2 default 20 if IDF_TARGET_ESP32C2
default 21 if IDF_TARGET_ESP32C3 default 21 if IDF_TARGET_ESP32C3
default 16 if IDF_TARGET_ESP32C6 default 16 if IDF_TARGET_ESP32C6
default 37 if IDF_TARGET_ESP32P4
default 24 if IDF_TARGET_ESP32H2 default 24 if IDF_TARGET_ESP32H2
default 43 default 43
help help
@@ -333,6 +335,7 @@ menu "ESP System Settings"
default 19 if IDF_TARGET_ESP32C2 default 19 if IDF_TARGET_ESP32C2
default 20 if IDF_TARGET_ESP32C3 default 20 if IDF_TARGET_ESP32C3
default 17 if IDF_TARGET_ESP32C6 default 17 if IDF_TARGET_ESP32C6
default 38 if IDF_TARGET_ESP32P4
default 23 if IDF_TARGET_ESP32H2 default 23 if IDF_TARGET_ESP32H2
default 44 default 44
help help
@@ -596,6 +599,7 @@ menu "IPC (Inter-Processor Call)"
config ESP_IPC_ISR_ENABLE config ESP_IPC_ISR_ENABLE
bool bool
default n if IDF_TARGET_ESP32P4 # TODO: IDF-7769
default y if !FREERTOS_UNICORE default y if !FREERTOS_UNICORE
help help
The IPC ISR feature is similar to the IPC feature except that the callback function is executed in the The IPC ISR feature is similar to the IPC feature except that the callback function is executed in the

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -24,6 +24,9 @@
#else #else
#include "soc/system_reg.h" #include "soc/system_reg.h"
#endif #endif
#if CONFIG_IDF_TARGET_ESP32P4
#include "soc/hp_system_reg.h"
#endif
#define REASON_YIELD BIT(0) #define REASON_YIELD BIT(0)
#define REASON_FREQ_SWITCH BIT(1) #define REASON_FREQ_SWITCH BIT(1)
@@ -66,6 +69,12 @@ static void IRAM_ATTR esp_crosscore_isr(void *arg) {
} else { } else {
WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, 0); WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, 0);
} }
#elif CONFIG_IDF_TARGET_ESP32P4
if (esp_cpu_get_core_id() == 0) {
WRITE_PERI_REG(HP_SYSTEM_CPU_INT_FROM_CPU_0_REG, 0);
} else {
WRITE_PERI_REG(HP_SYSTEM_CPU_INT_FROM_CPU_1_REG, 0);
}
#elif CONFIG_IDF_TARGET_ARCH_RISCV #elif CONFIG_IDF_TARGET_ARCH_RISCV
WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0); WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, 0);
#endif #endif
@@ -147,6 +156,12 @@ static void IRAM_ATTR esp_crosscore_int_send(int core_id, uint32_t reason_mask)
} else { } else {
WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, SYSTEM_CPU_INTR_FROM_CPU_1); WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_1_REG, SYSTEM_CPU_INTR_FROM_CPU_1);
} }
#elif CONFIG_IDF_TARGET_ESP32P4
if (core_id==0) {
WRITE_PERI_REG(HP_SYSTEM_CPU_INT_FROM_CPU_0_REG, HP_SYSTEM_CPU_INT_FROM_CPU_0);
} else {
WRITE_PERI_REG(HP_SYSTEM_CPU_INT_FROM_CPU_1_REG, HP_SYSTEM_CPU_INT_FROM_CPU_1);
}
#elif CONFIG_IDF_TARGET_ARCH_RISCV #elif CONFIG_IDF_TARGET_ARCH_RISCV
WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, SYSTEM_CPU_INTR_FROM_CPU_0); WRITE_PERI_REG(SYSTEM_CPU_INTR_FROM_CPU_0_REG, SYSTEM_CPU_INTR_FROM_CPU_0);
#endif #endif

View File

@@ -24,6 +24,8 @@
#include "esp_private/esp_pmu.h" #include "esp_private/esp_pmu.h"
#elif CONFIG_IDF_TARGET_ESP32H2 #elif CONFIG_IDF_TARGET_ESP32H2
#include "esp32h2/rom/rtc.h" #include "esp32h2/rom/rtc.h"
#elif CONFIG_IDF_TARGET_ESP32P4
#include "esp32p4/rom/rtc.h"
#endif #endif
#include "esp_log.h" #include "esp_log.h"
#include "esp_rom_sys.h" #include "esp_rom_sys.h"

View File

@@ -20,7 +20,7 @@
extern "C" { extern "C" {
#endif #endif
#if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S3 || CONFIG_IDF_TARGET_ESP32S2 #if CONFIG_IDF_TARGET_ESP32 || CONFIG_IDF_TARGET_ESP32S3 || CONFIG_IDF_TARGET_ESP32S2 || CONFIG_IDF_TARGET_ESP32P4
/** /**
* This macro also helps users switching between spinlock declarations/definitions for multi-/single core environments * This macro also helps users switching between spinlock declarations/definitions for multi-/single core environments
* if the macros below aren't sufficient. * if the macros below aren't sufficient.

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */

View File

@@ -0,0 +1,131 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/**
* ESP32-P4 Linker Script Memory Layout
* This file describes the memory layout (memory blocks) by virtual memory addresses.
* This linker script is passed through the C preprocessor to include configuration options.
* Please use preprocessor features sparingly!
* Restrict to simple macros with numeric values, and/or #if/#endif blocks.
*/
#include "sdkconfig.h"
#include "ld.common"
/**
* physical memory is mapped twice to the vritual address (IRAM and DRAM).
* `I_D_SRAM_OFFSET` is the offset between the two locations of the same physical memory
*/
#define SRAM_IRAM_START 0x4ff00000
#define SRAM_DRAM_START 0x4ff00000
#define I_D_SRAM_OFFSET (SRAM_IRAM_START - SRAM_DRAM_START)
#define SRAM_DRAM_END 0x4ff30bd0 - I_D_SRAM_OFFSET /* 2nd stage bootloader iram_loader_seg start address */
#define SRAM_IRAM_ORG (SRAM_IRAM_START)
#define SRAM_DRAM_ORG (SRAM_DRAM_START)
#define I_D_SRAM_SIZE SRAM_DRAM_END - SRAM_DRAM_ORG
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/**
* TODO: IDF-7890
*/
#define IDROM_SEG_SIZE (CONFIG_MMU_PAGE_SIZE << 10)
#endif
#define DRAM0_0_SEG_LEN I_D_SRAM_SIZE
MEMORY
{
/**
* All these values assume the flash cache is on, and have the blocks this uses subtracted from the length
* of the various regions. The 'data access port' dram/drom regions map to the same iram/irom regions but
* are connected to the data port of the CPU and eg allow byte-wise access.
*/
/* TCM */
tcm_idram_seg (RX) : org = 0x30100000, len = 0x2000
/* IRAM for PRO CPU. */
iram0_0_seg (RX) : org = SRAM_IRAM_ORG, len = I_D_SRAM_SIZE
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/* Flash mapped instruction data */
irom_seg (RX) : org = 0x40000020, len = IDROM_SEG_SIZE - 0x20
/**
* (0x20 offset above is a convenience for the app binary image generation.
* Flash cache has 64KB pages. The .bin file which is flashed to the chip
* has a 0x18 byte file header, and each segment has a 0x08 byte segment
* header. Setting this offset makes it simple to meet the flash cache MMU's
* constraint that (paddr % 64KB == vaddr % 64KB).)
*/
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/**
* Shared data RAM, excluding memory reserved for ROM bss/data/stack.
* Enabling Bluetooth & Trace Memory features in menuconfig will decrease the amount of RAM available.
*/
dram0_0_seg (RW) : org = SRAM_DRAM_ORG, len = DRAM0_0_SEG_LEN
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/* Flash mapped constant data */
drom_seg (R) : org = 0x40000020, len = IDROM_SEG_SIZE - 0x20
/* (See irom_seg for meaning of 0x20 offset in the above.) */
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/**
* lp ram memory (RWX). Persists over deep sleep. // TODO: IDF-5667
*/
#if CONFIG_ULP_COPROC_ENABLED
lp_ram_seg(RW) : org = 0x50108000 + CONFIG_ULP_COPROC_RESERVE_MEM,
len = 0x8000 - CONFIG_ULP_COPROC_RESERVE_MEM
#else
lp_ram_seg(RW) : org = 0x50108000 , len = 0x8000
#endif // CONFIG_ULP_COPROC_ENABLED
}
/* Heap ends at top of dram0_0_seg */
_heap_end = 0x50000000;
_data_seg_org = ORIGIN(rtc_data_seg);
/**
* The lines below define location alias for .rtc.data section
* P4 has no distinguished LP(RTC) fast and slow memory sections, instead, there is a unified LP_RAM section
* Thus, the following region segments are not configurable like on other targets
*/
REGION_ALIAS("rtc_iram_seg", lp_ram_seg );
REGION_ALIAS("rtc_data_seg", rtc_iram_seg );
REGION_ALIAS("rtc_slow_seg", rtc_iram_seg );
REGION_ALIAS("rtc_data_location", rtc_iram_seg );
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
REGION_ALIAS("default_code_seg", irom_seg);
#else
REGION_ALIAS("default_code_seg", iram0_0_seg);
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
REGION_ALIAS("default_rodata_seg", drom_seg);
#else
REGION_ALIAS("default_rodata_seg", dram0_0_seg);
#endif // CONFIG_APP_BUILD_USE_FLASH_SECTIONS
/**
* If rodata default segment is placed in `drom_seg`, then flash's first rodata section must
* also be first in the segment.
*/
#if CONFIG_APP_BUILD_USE_FLASH_SECTIONS
ASSERT(_flash_rodata_dummy_start == ORIGIN(default_rodata_seg),
".flash_rodata_dummy section must be placed at the beginning of the rodata segment.")
#endif
#if CONFIG_ESP_SYSTEM_USE_EH_FRAME
ASSERT ((__eh_frame_end > __eh_frame), "Error: eh_frame size is null!");
ASSERT ((__eh_frame_hdr_end > __eh_frame_hdr), "Error: eh_frame_hdr size is null!");
#endif

View File

@@ -0,0 +1,441 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/* Default entry point */
ENTRY(call_start_cpu0);
SECTIONS
{
/**
* RTC fast memory holds RTC wake stub code,
* including from any source file named rtc_wake_stub*.c
*/
.rtc.text :
{
. = ALIGN(4);
_rtc_fast_start = ABSOLUTE(.);
mapping[rtc_text]
*rtc_wake_stub*.*(.literal .text .literal.* .text.*)
*(.rtc_text_end_test)
/* 16B padding for possible CPU prefetch and 4B alignment for PMS split lines */
. += _esp_memprot_prefetch_pad_size;
. = ALIGN(4);
_rtc_text_end = ABSOLUTE(.);
} > lp_ram_seg
/**
* This section located in RTC FAST Memory area.
* It holds data marked with RTC_FAST_ATTR attribute.
* See the file "esp_attr.h" for more information.
*/
.rtc.force_fast :
{
. = ALIGN(4);
_rtc_force_fast_start = ABSOLUTE(.);
mapping[rtc_force_fast]
*(.rtc.force_fast .rtc.force_fast.*)
. = ALIGN(4) ;
_rtc_force_fast_end = ABSOLUTE(.);
} > lp_ram_seg
/**
* RTC data section holds RTC wake stub
* data/rodata, including from any source file
* named rtc_wake_stub*.c and the data marked with
* RTC_DATA_ATTR, RTC_RODATA_ATTR attributes.
*/
.rtc.data :
{
_rtc_data_start = ABSOLUTE(.);
mapping[rtc_data]
*rtc_wake_stub*.*(.data .rodata .data.* .rodata.* .srodata.*)
_rtc_data_end = ABSOLUTE(.);
} > lp_ram_seg
/* RTC bss, from any source file named rtc_wake_stub*.c */
.rtc.bss (NOLOAD) :
{
_rtc_bss_start = ABSOLUTE(.);
*rtc_wake_stub*.*(.bss .bss.* .sbss .sbss.*)
*rtc_wake_stub*.*(COMMON)
mapping[rtc_bss]
_rtc_bss_end = ABSOLUTE(.);
} > lp_ram_seg
/**
* This section holds data that should not be initialized at power up
* and will be retained during deep sleep.
* User data marked with RTC_NOINIT_ATTR will be placed
* into this section. See the file "esp_attr.h" for more information.
*/
.rtc_noinit (NOLOAD):
{
. = ALIGN(4);
_rtc_noinit_start = ABSOLUTE(.);
*(.rtc_noinit .rtc_noinit.*)
. = ALIGN(4) ;
_rtc_noinit_end = ABSOLUTE(.);
} > lp_ram_seg
/**
* This section located in RTC SLOW Memory area.
* It holds data marked with RTC_SLOW_ATTR attribute.
* See the file "esp_attr.h" for more information.
*/
.rtc.force_slow :
{
. = ALIGN(4);
_rtc_force_slow_start = ABSOLUTE(.);
*(.rtc.force_slow .rtc.force_slow.*)
. = ALIGN(4) ;
_rtc_force_slow_end = ABSOLUTE(.);
} > lp_ram_seg
/* Get size of rtc slow data based on rtc_data_location alias */
_rtc_slow_length = (ORIGIN(rtc_slow_seg) == ORIGIN(rtc_data_location))
? (_rtc_force_slow_end - _rtc_data_start)
: (_rtc_force_slow_end - _rtc_force_slow_start);
_rtc_fast_length = (ORIGIN(rtc_slow_seg) == ORIGIN(rtc_data_location))
? (_rtc_force_fast_end - _rtc_fast_start)
: (_rtc_noinit_end - _rtc_fast_start);
ASSERT((_rtc_slow_length <= LENGTH(rtc_slow_seg)),
"RTC_SLOW segment data does not fit.")
ASSERT((_rtc_fast_length <= LENGTH(rtc_data_seg)),
"RTC_FAST segment data does not fit.")
.tcm.text :
{
/* Code marked as running out of TCM */
_tcm_text_start = ABSOLUTE(.);
mapping[tcm_text]
_tcm_text_end = ABSOLUTE(.);
} > tcm_idram_seg
.tcm.data :
{
_tcm_data_start = ABSOLUTE(.);
mapping[tcm_data]
_tcm_data_end = ABSOLUTE(.);
. = ALIGN(4);
} > tcm_idram_seg
.iram0.text :
{
_iram_start = ABSOLUTE(.);
/* Vectors go to start of IRAM */
ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned");
KEEP(*(.exception_vectors.text));
. = ALIGN(4);
_invalid_pc_placeholder = ABSOLUTE(.);
/* Code marked as running out of IRAM */
_iram_text_start = ABSOLUTE(.);
mapping[iram0_text]
} > iram0_0_seg
/* Marks the end of IRAM code segment */
.iram0.text_end (NOLOAD) :
{
/* ESP32-C6 memprot requires 16B padding for possible CPU prefetch and 512B alignment for PMS split lines */
. += _esp_memprot_prefetch_pad_size;
. = ALIGN(_esp_memprot_align_size);
/* iram_end_test section exists for use by memprot unit tests only */
*(.iram_end_test)
_iram_text_end = ABSOLUTE(.);
} > iram0_0_seg
.iram0.data :
{
. = ALIGN(16);
_iram_data_start = ABSOLUTE(.);
mapping[iram0_data]
_iram_data_end = ABSOLUTE(.);
} > iram0_0_seg
.iram0.bss (NOLOAD) :
{
. = ALIGN(16);
_iram_bss_start = ABSOLUTE(.);
mapping[iram0_bss]
_iram_bss_end = ABSOLUTE(.);
. = ALIGN(16);
_iram_end = ABSOLUTE(.);
} > iram0_0_seg
/**
* This section is required to skip .iram0.text area because iram0_0_seg and
* dram0_0_seg reflect the same address space on different buses.
*/
.dram0.dummy (NOLOAD):
{
. = ORIGIN(dram0_0_seg) + _iram_end - _iram_start;
} > dram0_0_seg
.dram0.data :
{
_data_start = ABSOLUTE(.);
*(.gnu.linkonce.d.*)
*(.data1)
__global_pointer$ = . + 0x800;
*(.sdata)
*(.sdata.*)
*(.gnu.linkonce.s.*)
*(.gnu.linkonce.s2.*)
*(.jcr)
mapping[dram0_data]
_data_end = ABSOLUTE(.);
. = ALIGN(4);
} > dram0_0_seg
/**
* This section holds data that should not be initialized at power up.
* The section located in Internal SRAM memory region. The macro _NOINIT
* can be used as attribute to place data into this section.
* See the "esp_attr.h" file for more information.
*/
.noinit (NOLOAD):
{
. = ALIGN(4);
_noinit_start = ABSOLUTE(.);
*(.noinit .noinit.*)
. = ALIGN(4) ;
_noinit_end = ABSOLUTE(.);
} > dram0_0_seg
/* Shared RAM */
.dram0.bss (NOLOAD) :
{
. = ALIGN (8);
_bss_start = ABSOLUTE(.);
mapping[dram0_bss]
*(.dynsbss)
*(.sbss)
*(.sbss.*)
*(.gnu.linkonce.sb.*)
*(.scommon)
*(.sbss2)
*(.sbss2.*)
*(.gnu.linkonce.sb2.*)
*(.dynbss)
*(.share.mem)
*(.gnu.linkonce.b.*)
. = ALIGN (8);
_bss_end = ABSOLUTE(.);
} > dram0_0_seg
ASSERT(((_bss_end - ORIGIN(dram0_0_seg)) <= LENGTH(dram0_0_seg)), "DRAM segment data does not fit.")
.flash.text :
{
_stext = .;
_instruction_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.text start, this can be used for mmu driver to maintain virtual address */
_text_start = ABSOLUTE(.);
mapping[flash_text]
*(.stub .gnu.warning .gnu.linkonce.literal.* .gnu.linkonce.t.*.literal .gnu.linkonce.t.*)
*(.irom0.text) /* catch stray ICACHE_RODATA_ATTR */
*(.fini.literal)
*(.fini)
*(.gnu.version)
/** CPU will try to prefetch up to 16 bytes of
* of instructions. This means that any configuration (e.g. MMU, PMS) must allow
* safe access to up to 16 bytes after the last real instruction, add
* dummy bytes to ensure this
*/
. += _esp_flash_mmap_prefetch_pad_size;
_text_end = ABSOLUTE(.);
_instruction_reserved_end = ABSOLUTE(.); /* This is a symbol marking the flash.text end, this can be used for mmu driver to maintain virtual address */
_etext = .;
/**
* Similar to _iram_start, this symbol goes here so it is
* resolved by addr2line in preference to the first symbol in
* the flash.text segment.
*/
_flash_cache_start = ABSOLUTE(0);
} > default_code_seg
/**
* This dummy section represents the .flash.text section but in default_rodata_seg.
* Thus, it must have its alignment and (at least) its size.
*/
.flash_rodata_dummy (NOLOAD):
{
_flash_rodata_dummy_start = .;
/* Start at the same alignment constraint than .flash.text */
. = ALIGN(ALIGNOF(.flash.text));
/* Create an empty gap as big as .flash.text section */
. = . + SIZEOF(.flash.text);
/* Prepare the alignment of the section above. Few bytes (0x20) must be
* added for the mapping header. */
. = ALIGN(_esp_mmu_block_size) + 0x20;
} > default_rodata_seg
.flash.appdesc : ALIGN(0x10)
{
_rodata_reserved_start = ABSOLUTE(.); /* This is a symbol marking the flash.rodata start, this can be used for mmu driver to maintain virtual address */
_rodata_start = ABSOLUTE(.);
*(.rodata_desc .rodata_desc.*) /* Should be the first. App version info. DO NOT PUT ANYTHING BEFORE IT! */
*(.rodata_custom_desc .rodata_custom_desc.*) /* Should be the second. Custom app version info. DO NOT PUT ANYTHING BEFORE IT! */
/* Create an empty gap within this section. Thanks to this, the end of this
* section will match .flash.rodata's begin address. Thus, both sections
* will be merged when creating the final bin image. */
. = ALIGN(ALIGNOF(.flash.rodata));
} > default_rodata_seg
.flash.rodata : ALIGN(0x10)
{
_flash_rodata_start = ABSOLUTE(.);
mapping[flash_rodata]
*(.irom1.text) /* catch stray ICACHE_RODATA_ATTR */
*(.gnu.linkonce.r.*)
*(.rodata1)
__XT_EXCEPTION_TABLE_ = ABSOLUTE(.);
*(.xt_except_table)
*(.gcc_except_table .gcc_except_table.*)
*(.gnu.linkonce.e.*)
*(.gnu.version_r)
. = (. + 7) & ~ 3;
/*
* C++ constructor and destructor tables
* Don't include anything from crtbegin.o or crtend.o, as IDF doesn't use toolchain crt.
*
* RISC-V gcc is configured with --enable-initfini-array so it emits an .init_array section instead.
* But the init_priority sections will be sorted for iteration in ascending order during startup.
* The rest of the init_array sections is sorted for iteration in descending order during startup, however.
* Hence a different section is generated for the init_priority functions which is iterated in
* ascending order during startup. The corresponding code can be found in startup.c.
*/
__init_priority_array_start = ABSOLUTE(.);
KEEP (*(EXCLUDE_FILE (*crtend.* *crtbegin.*) .init_array.*))
__init_priority_array_end = ABSOLUTE(.);
__init_array_start = ABSOLUTE(.);
KEEP (*(EXCLUDE_FILE (*crtend.* *crtbegin.*) .init_array))
__init_array_end = ABSOLUTE(.);
KEEP (*crtbegin.*(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.*) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
/* C++ exception handlers table: */
__XT_EXCEPTION_DESCS_ = ABSOLUTE(.);
*(.xt_except_desc)
*(.gnu.linkonce.h.*)
__XT_EXCEPTION_DESCS_END__ = ABSOLUTE(.);
*(.xt_except_desc_end)
*(.dynamic)
*(.gnu.version_d)
/* Addresses of memory regions reserved via SOC_RESERVE_MEMORY_REGION() */
soc_reserved_memory_region_start = ABSOLUTE(.);
KEEP (*(.reserved_memory_address))
soc_reserved_memory_region_end = ABSOLUTE(.);
/* System init functions registered via ESP_SYSTEM_INIT_FN */
_esp_system_init_fn_array_start = ABSOLUTE(.);
KEEP (*(SORT_BY_INIT_PRIORITY(.esp_system_init_fn.*)))
_esp_system_init_fn_array_end = ABSOLUTE(.);
_rodata_end = ABSOLUTE(.);
/* Literals are also RO data. */
_lit4_start = ABSOLUTE(.);
*(*.lit4)
*(.lit4.*)
*(.gnu.linkonce.lit4.*)
_lit4_end = ABSOLUTE(.);
. = ALIGN(4);
_thread_local_start = ABSOLUTE(.);
*(.tdata)
*(.tdata.*)
*(.tbss)
*(.tbss.*)
_thread_local_end = ABSOLUTE(.);
. = ALIGN(ALIGNOF(.eh_frame));
} > default_rodata_seg
/* Keep this section shall be at least aligned on 4 */
.eh_frame : ALIGN(8)
{
__eh_frame = ABSOLUTE(.);
KEEP (*(.eh_frame))
__eh_frame_end = ABSOLUTE(.);
/* Guarantee that this section and the next one will be merged by making
* them adjacent. */
. = ALIGN(ALIGNOF(.eh_frame_hdr));
} > default_rodata_seg
/* To avoid any exception in C++ exception frame unwinding code, this section
* shall be aligned on 8. */
.eh_frame_hdr : ALIGN(8)
{
__eh_frame_hdr = ABSOLUTE(.);
KEEP (*(.eh_frame_hdr))
__eh_frame_hdr_end = ABSOLUTE(.);
} > default_rodata_seg
/*
This section is a place where we dump all the rodata which aren't used at runtime,
so as to avoid binary size increase
*/
.flash.rodata_noload (NOLOAD) :
{
/*
This is a symbol marking the flash.rodata end, this can be used for mmu driver to maintain virtual address
We don't need to include the noload rodata in this section
*/
_rodata_reserved_end = ABSOLUTE(.);
. = ALIGN (4);
mapping[rodata_noload]
} > default_rodata_seg
/* Marks the end of data, bss and possibly rodata */
.dram0.heap_start (NOLOAD) :
{
. = ALIGN (16);
_heap_start = ABSOLUTE(.);
} > dram0_0_seg
}
ASSERT(((_iram_end - ORIGIN(iram0_0_seg)) <= LENGTH(iram0_0_seg)),
"IRAM0 segment data does not fit.")
ASSERT(((_heap_start - ORIGIN(dram0_0_seg)) <= LENGTH(dram0_0_seg)),
"DRAM segment data does not fit.")

View File

@@ -61,6 +61,11 @@
#include "esp32c2/rom/cache.h" #include "esp32c2/rom/cache.h"
#include "esp32c2/rom/rtc.h" #include "esp32c2/rom/rtc.h"
#include "esp32c2/rom/secure_boot.h" #include "esp32c2/rom/secure_boot.h"
#elif CONFIG_IDF_TARGET_ESP32P4
#include "esp32p4/rtc.h"
#include "soc/hp_sys_clkrst_reg.h"
#include "soc/interrupt_core0_reg.h"
#include "soc/interrupt_core1_reg.h"
#endif #endif
#include "esp_private/esp_mmu_map_private.h" #include "esp_private/esp_mmu_map_private.h"
@@ -163,6 +168,28 @@ void startup_resume_other_cores(void)
void IRAM_ATTR call_start_cpu1(void) void IRAM_ATTR call_start_cpu1(void)
{ {
#ifdef __riscv
// Configure the global pointer register
// (This should be the first thing IDF app does, as any other piece of code could be
// relaxed by the linker to access something relative to __global_pointer$)
__asm__ __volatile__ (
".option push\n"
".option norelax\n"
"la gp, __global_pointer$\n"
".option pop"
);
#endif //#ifdef __riscv
#if CONFIG_IDF_TARGET_ESP32P4
//TODO: IDF-7770
//set mstatus.fs=2'b01, floating-point unit in the initialization state
asm volatile(
"li t0, 0x2000\n"
"csrrs t0, mstatus, t0\n"
:::"t0"
);
#endif //#if CONFIG_IDF_TARGET_ESP32P4
#if SOC_BRANCH_PREDICTOR_SUPPORTED #if SOC_BRANCH_PREDICTOR_SUPPORTED
esp_cpu_branch_prediction_enable(); esp_cpu_branch_prediction_enable();
#endif //#if SOC_BRANCH_PREDICTOR_SUPPORTED #endif //#if SOC_BRANCH_PREDICTOR_SUPPORTED
@@ -188,6 +215,8 @@ void IRAM_ATTR call_start_cpu1(void)
#if CONFIG_IDF_TARGET_ESP32 #if CONFIG_IDF_TARGET_ESP32
DPORT_REG_SET_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_PDEBUG_ENABLE | DPORT_APP_CPU_RECORD_ENABLE); DPORT_REG_SET_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_PDEBUG_ENABLE | DPORT_APP_CPU_RECORD_ENABLE);
DPORT_REG_CLR_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_RECORD_ENABLE); DPORT_REG_CLR_BIT(DPORT_APP_CPU_RECORD_CTRL_REG, DPORT_APP_CPU_RECORD_ENABLE);
#elif CONFIG_IDF_TARGET_ESP32P4
//TODO: IDF-7688
#else #else
REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_PDEBUGENABLE_REG, 1); REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_PDEBUGENABLE_REG, 1);
REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_RECORDING_REG, 1); REG_WRITE(ASSIST_DEBUG_CORE_1_RCD_RECORDING_REG, 1);
@@ -259,6 +288,13 @@ static void start_other_core(void)
REG_SET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING); REG_SET_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING);
REG_CLR_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING); REG_CLR_BIT(SYSTEM_CORE_1_CONTROL_0_REG, SYSTEM_CONTROL_CORE_1_RESETING);
} }
#elif CONFIG_IDF_TARGET_ESP32P4
if (!REG_GET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN)) {
REG_SET_BIT(HP_SYS_CLKRST_SOC_CLK_CTRL0_REG, HP_SYS_CLKRST_REG_CORE1_CPU_CLK_EN);
}
if(REG_GET_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL)){
REG_CLR_BIT(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_CORE1_GLOBAL);
}
#endif #endif
ets_set_appcpu_boot_addr((uint32_t)call_start_cpu1); ets_set_appcpu_boot_addr((uint32_t)call_start_cpu1);
@@ -269,10 +305,13 @@ static void start_other_core(void)
for (int i = 0; i < SOC_CPU_CORES_NUM; i++) { for (int i = 0; i < SOC_CPU_CORES_NUM; i++) {
cpus_up &= s_cpu_up[i]; cpus_up &= s_cpu_up[i];
} }
//TODO: IDF-7891, check mixing logs
esp_rom_delay_us(100); esp_rom_delay_us(100);
} }
} }
#if !CONFIG_IDF_TARGET_ESP32P4
//TODO: IDF-7692
// This function is needed to make the multicore app runnable on a unicore bootloader (built with FREERTOS UNICORE). // This function is needed to make the multicore app runnable on a unicore bootloader (built with FREERTOS UNICORE).
// It does some cache settings for other CPUs. // It does some cache settings for other CPUs.
void IRAM_ATTR do_multicore_settings(void) void IRAM_ATTR do_multicore_settings(void)
@@ -303,6 +342,7 @@ void IRAM_ATTR do_multicore_settings(void)
cache_hal_enable(CACHE_TYPE_ALL); cache_hal_enable(CACHE_TYPE_ALL);
#endif #endif
} }
#endif //#if !CONFIG_IDF_TARGET_ESP32P4
#endif // !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE #endif // !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE
/* /*
@@ -336,6 +376,16 @@ void IRAM_ATTR call_start_cpu0(void)
); );
#endif #endif
#if CONFIG_IDF_TARGET_ESP32P4
//TODO: IDF-7770
//set mstatus.fs=2'b01, floating-point unit in the initialization state
asm volatile(
"li t0, 0x2000\n"
"csrrs t0, mstatus, t0\n"
:::"t0"
);
#endif //#if CONFIG_IDF_TARGET_ESP32P4
#if SOC_BRANCH_PREDICTOR_SUPPORTED #if SOC_BRANCH_PREDICTOR_SUPPORTED
esp_cpu_branch_prediction_enable(); esp_cpu_branch_prediction_enable();
#endif #endif
@@ -371,8 +421,11 @@ void IRAM_ATTR call_start_cpu0(void)
ESP_EARLY_LOGI(TAG, "Unicore app"); ESP_EARLY_LOGI(TAG, "Unicore app");
#else #else
ESP_EARLY_LOGI(TAG, "Multicore app"); ESP_EARLY_LOGI(TAG, "Multicore app");
#if !CONFIG_IDF_TARGET_ESP32P4
//TODO: IDF-7692
// It helps to fix missed cache settings for other cores. It happens when bootloader is unicore. // It helps to fix missed cache settings for other cores. It happens when bootloader is unicore.
do_multicore_settings(); do_multicore_settings();
#endif //#if !CONFIG_IDF_TARGET_ESP32P4
#endif #endif
#endif // !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP #endif // !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP
@@ -617,10 +670,12 @@ void IRAM_ATTR call_start_cpu0(void)
#endif #endif
#endif #endif
#if !CONFIG_IDF_TARGET_ESP32P4 //TODO: IDF-7529
// Need to unhold the IOs that were hold right before entering deep sleep, which are used as wakeup pins // Need to unhold the IOs that were hold right before entering deep sleep, which are used as wakeup pins
if (rst_reas[0] == RESET_REASON_CORE_DEEP_SLEEP) { if (rst_reas[0] == RESET_REASON_CORE_DEEP_SLEEP) {
esp_deep_sleep_wakeup_io_reset(); esp_deep_sleep_wakeup_io_reset();
} }
#endif //#if !CONFIG_IDF_TARGET_ESP32P4
#if !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP #if !CONFIG_APP_BUILD_TYPE_PURE_RAM_APP
esp_cache_err_int_init(); esp_cache_err_int_init();

View File

@@ -0,0 +1,11 @@
set(srcs "clk.c"
"reset_reason.c"
"system_internal.c"
"cache_err_int.c"
"../../arch/riscv/expression_with_stack.c"
"../../arch/riscv/panic_arch.c"
"../../arch/riscv/debug_stubs.c")
add_prefix(srcs "${CMAKE_CURRENT_LIST_DIR}/" ${srcs})
target_sources(${COMPONENT_LIB} PRIVATE ${srcs})

View File

@@ -0,0 +1,43 @@
menu "Cache config"
choice ESP32P4_L2_CACHE_SIZE
prompt "L2 cache size"
default ESP32P4_L2_CACHE_128KB
help
L2 cache size to be set on application startup.
config ESP32P4_L2_CACHE_128KB
bool "128KB"
config ESP32P4_L2_CACHE_256KB
bool "256KB"
config ESP32P4_L2_CACHE_512KB
bool "512KB"
endchoice
config ESP32P4_L2_CACHE_SIZE
hex
default 0x20000 if ESP32P4_L2_CACHE_128KB
default 0x40000 if ESP32P4_L2_CACHE_256KB
default 0x80000 if ESP32P4_L2_CACHE_512KB
choice ESP32P4_L2_CACHE_LINE_SIZE
prompt "L2 cache line size"
default ESP32P4_L2_CACHE_LINE_64B if ESP32P4_L2_CACHE_128KB
default ESP32P4_L2_CACHE_LINE_64B if ESP32P4_L2_CACHE_256KB
default ESP32P4_L2_CACHE_LINE_128B if ESP32P4_L2_CACHE_512KB
help
L2 cache line size to be set on application startup.
config ESP32P4_L2_CACHE_LINE_64B
bool "64 Bytes"
depends on ESP32P4_L2_CACHE_128KB || ESP32P4_L2_CACHE_256KB
config ESP32P4_L2_CACHE_LINE_128B
bool "128 Bytes"
endchoice
config ESP32P4_L2_CACHE_LINE_SIZE
int
default 64 if ESP32P4_L2_CACHE_LINE_64B
default 128 if ESP32P4_L2_CACHE_LINE_128B
endmenu # Cache config

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
/*
The cache has an interrupt that can be raised as soon as an access to a cached
region (flash) is done without the cache being enabled. We use that here
to panic the CPU, which from a debugging perspective is better than grabbing bad
data from the bus.
*/
#include "esp_rom_sys.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_intr_alloc.h"
#include "soc/periph_defs.h"
#include "riscv/interrupt.h"
#include "hal/cache_ll.h"
static const char *TAG = "CACHE_ERR";
//TODO: IDF-7515
void esp_cache_err_int_init(void)
{
const uint32_t core_id = 0;
/* Disable cache interrupts if enabled. */
ESP_INTR_DISABLE(ETS_CACHEERR_INUM);
/**
* Bind all cache errors to ETS_CACHEERR_INUM interrupt. we will deal with
* them in handler by different types
*
* On ESP32P4 boards, the cache is a shared one but buses are still
* distinct. So, we have an bus0 and a bus1 sharing the same cache.
* This error can occur if a bus performs a request but the cache
* is disabled.
*/
esp_rom_route_intr_matrix(core_id, ETS_CACHE_INTR_SOURCE, ETS_CACHEERR_INUM);
/* Set the type and priority to cache error interrupts. */
esprv_intc_int_set_type(ETS_CACHEERR_INUM, INTR_TYPE_LEVEL);
esprv_intc_int_set_priority(ETS_CACHEERR_INUM, SOC_INTERRUPT_LEVEL_MEDIUM);
ESP_DRAM_LOGV(TAG, "access error intr clr & ena mask is: 0x%x", CACHE_LL_L1_ACCESS_EVENT_MASK);
/* On the hardware side, start by clearing all the bits reponsible for cache access error */
cache_ll_l1_clear_access_error_intr(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
/* Then enable cache access error interrupts. */
cache_ll_l1_enable_access_error_intr(0, CACHE_LL_L1_ACCESS_EVENT_MASK);
/* Enable the interrupts for cache error. */
ESP_INTR_ENABLE(ETS_CACHEERR_INUM);
}
int IRAM_ATTR esp_cache_err_get_cpuid(void)
{
//TODO: IDF-7515
//Should return hart ID according to the cache error
return 0;
}

View File

@@ -0,0 +1,277 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <stdint.h>
#include <sys/cdefs.h>
#include <sys/time.h>
#include <sys/param.h>
#include "sdkconfig.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_clk_internal.h"
#include "esp32p4/rom/ets_sys.h"
#include "esp32p4/rom/uart.h"
#include "soc/soc.h"
#include "soc/rtc.h"
#include "soc/rtc_periph.h"
#include "soc/i2s_reg.h"
#include "esp_cpu.h"
#include "hal/wdt_hal.h"
#include "esp_private/esp_modem_clock.h"
#include "esp_private/periph_ctrl.h"
#include "esp_private/esp_clk.h"
#include "esp_private/esp_pmu.h"
#include "esp_rom_uart.h"
#include "esp_rom_sys.h"
/* Number of cycles to wait from the 32k XTAL oscillator to consider it running.
* Larger values increase startup delay. Smaller values may cause false positive
* detection (i.e. oscillator runs for a few cycles and then stops).
*/
#define SLOW_CLK_CAL_CYCLES CONFIG_RTC_CLK_CAL_CYCLES
#define MHZ (1000000)
static void select_rtc_slow_clk(soc_rtc_slow_clk_src_t rtc_slow_clk_src);
static const char *TAG = "clk";
__attribute__((weak)) void esp_clk_init(void)
{
#if SOC_PMU_SUPPORTED
pmu_init();
#endif //SOC_PMU_SUPPORTED
assert(rtc_clk_xtal_freq_get() == RTC_XTAL_FREQ_40M);
rtc_clk_8m_enable(true);
rtc_clk_fast_src_set(SOC_RTC_FAST_CLK_SRC_RC_FAST);
#ifdef CONFIG_BOOTLOADER_WDT_ENABLE
// WDT uses a SLOW_CLK clock source. After a function select_rtc_slow_clk a frequency of this source can changed.
// If the frequency changes from 150kHz to 32kHz, then the timeout set for the WDT will increase 4.6 times.
// Therefore, for the time of frequency change, set a new lower timeout value (1.6 sec).
// This prevents excessive delay before resetting in case the supply voltage is drawdown.
// (If frequency is changed from 150kHz to 32kHz then WDT timeout will increased to 1.6sec * 150/32 = 7.5 sec).
wdt_hal_context_t rtc_wdt_ctx = RWDT_HAL_CONTEXT_DEFAULT();
uint32_t stage_timeout_ticks = (uint32_t)(1600ULL * rtc_clk_slow_freq_get_hz() / 1000ULL);
wdt_hal_write_protect_disable(&rtc_wdt_ctx);
wdt_hal_feed(&rtc_wdt_ctx);
//Bootloader has enabled RTC WDT until now. We're only modifying timeout, so keep the stage and timeout action the same
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC);
wdt_hal_write_protect_enable(&rtc_wdt_ctx);
#endif
#if defined(CONFIG_RTC_CLK_SRC_EXT_CRYS)
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_XTAL32K);
#elif defined(CONFIG_RTC_CLK_SRC_EXT_OSC)
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_OSC_SLOW);
#elif defined(CONFIG_RTC_CLK_SRC_INT_RC32K)
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_RC32K);
#else
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_RC_SLOW);
#endif
#ifdef CONFIG_BOOTLOADER_WDT_ENABLE
// After changing a frequency WDT timeout needs to be set for new frequency.
stage_timeout_ticks = (uint32_t)((uint64_t)CONFIG_BOOTLOADER_WDT_TIME_MS * rtc_clk_slow_freq_get_hz() / 1000);
wdt_hal_write_protect_disable(&rtc_wdt_ctx);
wdt_hal_feed(&rtc_wdt_ctx);
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC);
wdt_hal_write_protect_enable(&rtc_wdt_ctx);
#endif
rtc_cpu_freq_config_t old_config, new_config;
rtc_clk_cpu_freq_get_config(&old_config);
const uint32_t old_freq_mhz = old_config.freq_mhz;
const uint32_t new_freq_mhz = CONFIG_ESP_DEFAULT_CPU_FREQ_MHZ;
bool res = rtc_clk_cpu_freq_mhz_to_config(new_freq_mhz, &new_config);
assert(res);
// Wait for UART TX to finish, otherwise some UART output will be lost
// when switching APB frequency
esp_rom_uart_tx_wait_idle(CONFIG_ESP_CONSOLE_UART_NUM);
if (res) {
rtc_clk_cpu_freq_set_config(&new_config);
}
// Re calculate the ccount to make time calculation correct.
esp_cpu_set_cycle_count( (uint64_t)esp_cpu_get_cycle_count() * new_freq_mhz / old_freq_mhz );
}
static void select_rtc_slow_clk(soc_rtc_slow_clk_src_t rtc_slow_clk_src)
{
uint32_t cal_val = 0;
/* number of times to repeat 32k XTAL calibration
* before giving up and switching to the internal RC
*/
int retry_32k_xtal = 3;
do {
if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_XTAL32K || rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_OSC_SLOW) {
/* 32k XTAL oscillator needs to be enabled and running before it can
* be used. Hardware doesn't have a direct way of checking if the
* oscillator is running. Here we use rtc_clk_cal function to count
* the number of main XTAL cycles in the given number of 32k XTAL
* oscillator cycles. If the 32k XTAL has not started up, calibration
* will time out, returning 0.
*/
ESP_EARLY_LOGD(TAG, "waiting for 32k oscillator to start up");
rtc_cal_sel_t cal_sel = 0;
if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_XTAL32K) {
rtc_clk_32k_enable(true);
cal_sel = RTC_CAL_32K_XTAL;
} else if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_OSC_SLOW) {
rtc_clk_32k_enable_external();
cal_sel = RTC_CAL_32K_OSC_SLOW;
}
// When SLOW_CLK_CAL_CYCLES is set to 0, clock calibration will not be performed at startup.
if (SLOW_CLK_CAL_CYCLES > 0) {
cal_val = rtc_clk_cal(cal_sel, SLOW_CLK_CAL_CYCLES);
if (cal_val == 0) {
if (retry_32k_xtal-- > 0) {
continue;
}
ESP_EARLY_LOGW(TAG, "32 kHz clock not found, switching to internal 150 kHz oscillator");
rtc_slow_clk_src = SOC_RTC_SLOW_CLK_SRC_RC_SLOW;
}
}
} else if (rtc_slow_clk_src == SOC_RTC_SLOW_CLK_SRC_RC32K) {
rtc_clk_rc32k_enable(true);
}
rtc_clk_slow_src_set(rtc_slow_clk_src);
if (SLOW_CLK_CAL_CYCLES > 0) {
/* TODO: 32k XTAL oscillator has some frequency drift at startup.
* Improve calibration routine to wait until the frequency is stable.
*/
cal_val = rtc_clk_cal(RTC_CAL_RTC_MUX, SLOW_CLK_CAL_CYCLES);
} else {
const uint64_t cal_dividend = (1ULL << RTC_CLK_CAL_FRACT) * 1000000ULL;
cal_val = (uint32_t) (cal_dividend / rtc_clk_slow_freq_get_hz());
}
} while (cal_val == 0);
ESP_EARLY_LOGD(TAG, "RTC_SLOW_CLK calibration value: %d", cal_val);
esp_clk_slowclk_cal_set(cal_val);
}
void rtc_clk_select_rtc_slow_clk(void)
{
select_rtc_slow_clk(SOC_RTC_SLOW_CLK_SRC_XTAL32K);
}
/* This function is not exposed as an API at this point.
* All peripheral clocks are default enabled after chip is powered on.
* This function disables some peripheral clocks when cpu starts.
* These peripheral clocks are enabled when the peripherals are initialized
* and disabled when they are de-initialized.
*/
__attribute__((weak)) void esp_perip_clk_init(void)
{
modem_clock_domain_pmu_state_icg_map_init();
ESP_EARLY_LOGW(TAG, "esp_perip_clk_init() has not been implemented yet");
#if 0 // TODO: IDF-5658
uint32_t common_perip_clk, hwcrypto_perip_clk, wifi_bt_sdio_clk = 0;
uint32_t common_perip_clk1 = 0;
soc_reset_reason_t rst_reason = esp_rom_get_reset_reason(0);
/* For reason that only reset CPU, do not disable the clocks
* that have been enabled before reset.
*/
if (rst_reason == RESET_REASON_CPU0_MWDT0 || rst_reason == RESET_REASON_CPU0_SW ||
rst_reason == RESET_REASON_CPU0_RTC_WDT || rst_reason == RESET_REASON_CPU0_MWDT1) {
common_perip_clk = ~READ_PERI_REG(SYSTEM_PERIP_CLK_EN0_REG);
hwcrypto_perip_clk = ~READ_PERI_REG(SYSTEM_PERIP_CLK_EN1_REG);
wifi_bt_sdio_clk = ~READ_PERI_REG(SYSTEM_WIFI_CLK_EN_REG);
} else {
common_perip_clk = SYSTEM_WDG_CLK_EN |
SYSTEM_I2S0_CLK_EN |
#if CONFIG_ESP_CONSOLE_UART_NUM != 0
SYSTEM_UART_CLK_EN |
#endif
#if CONFIG_ESP_CONSOLE_UART_NUM != 1
SYSTEM_UART1_CLK_EN |
#endif
SYSTEM_SPI2_CLK_EN |
SYSTEM_I2C_EXT0_CLK_EN |
SYSTEM_UHCI0_CLK_EN |
SYSTEM_RMT_CLK_EN |
SYSTEM_LEDC_CLK_EN |
SYSTEM_TIMERGROUP1_CLK_EN |
SYSTEM_SPI3_CLK_EN |
SYSTEM_SPI4_CLK_EN |
SYSTEM_TWAI_CLK_EN |
SYSTEM_I2S1_CLK_EN |
SYSTEM_SPI2_DMA_CLK_EN |
SYSTEM_SPI3_DMA_CLK_EN;
common_perip_clk1 = 0;
hwcrypto_perip_clk = SYSTEM_CRYPTO_AES_CLK_EN |
SYSTEM_CRYPTO_SHA_CLK_EN |
SYSTEM_CRYPTO_RSA_CLK_EN;
wifi_bt_sdio_clk = SYSTEM_WIFI_CLK_WIFI_EN |
SYSTEM_WIFI_CLK_BT_EN_M |
SYSTEM_WIFI_CLK_UNUSED_BIT5 |
SYSTEM_WIFI_CLK_UNUSED_BIT12;
}
//Reset the communication peripherals like I2C, SPI, UART, I2S and bring them to known state.
common_perip_clk |= SYSTEM_I2S0_CLK_EN |
#if CONFIG_ESP_CONSOLE_UART_NUM != 0
SYSTEM_UART_CLK_EN |
#endif
#if CONFIG_ESP_CONSOLE_UART_NUM != 1
SYSTEM_UART1_CLK_EN |
#endif
SYSTEM_SPI2_CLK_EN |
SYSTEM_I2C_EXT0_CLK_EN |
SYSTEM_UHCI0_CLK_EN |
SYSTEM_RMT_CLK_EN |
SYSTEM_UHCI1_CLK_EN |
SYSTEM_SPI3_CLK_EN |
SYSTEM_SPI4_CLK_EN |
SYSTEM_I2C_EXT1_CLK_EN |
SYSTEM_I2S1_CLK_EN |
SYSTEM_SPI2_DMA_CLK_EN |
SYSTEM_SPI3_DMA_CLK_EN;
common_perip_clk1 = 0;
/* Change I2S clock to audio PLL first. Because if I2S uses 160MHz clock,
* the current is not reduced when disable I2S clock.
*/
// TOCK(check replacement)
// REG_SET_FIELD(I2S_CLKM_CONF_REG(0), I2S_CLK_SEL, I2S_CLK_AUDIO_PLL);
// REG_SET_FIELD(I2S_CLKM_CONF_REG(1), I2S_CLK_SEL, I2S_CLK_AUDIO_PLL);
/* Disable some peripheral clocks. */
CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN0_REG, common_perip_clk);
SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN0_REG, common_perip_clk);
CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN1_REG, common_perip_clk1);
SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN1_REG, common_perip_clk1);
/* Disable hardware crypto clocks. */
CLEAR_PERI_REG_MASK(SYSTEM_PERIP_CLK_EN1_REG, hwcrypto_perip_clk);
SET_PERI_REG_MASK(SYSTEM_PERIP_RST_EN1_REG, hwcrypto_perip_clk);
/* Disable WiFi/BT/SDIO clocks. */
CLEAR_PERI_REG_MASK(SYSTEM_WIFI_CLK_EN_REG, wifi_bt_sdio_clk);
SET_PERI_REG_MASK(SYSTEM_WIFI_CLK_EN_REG, SYSTEM_WIFI_CLK_EN);
/* Set WiFi light sleep clock source to RTC slow clock */
REG_SET_FIELD(SYSTEM_BT_LPCK_DIV_INT_REG, SYSTEM_BT_LPCK_DIV_NUM, 0);
CLEAR_PERI_REG_MASK(SYSTEM_BT_LPCK_DIV_FRAC_REG, SYSTEM_LPCLK_SEL_8M);
SET_PERI_REG_MASK(SYSTEM_BT_LPCK_DIV_FRAC_REG, SYSTEM_LPCLK_SEL_RTC_SLOW);
/* Enable RNG clock. */
periph_module_enable(PERIPH_RNG_MODULE);
#endif
}

View File

@@ -0,0 +1,110 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "esp_system.h"
#include "esp_rom_sys.h"
#include "esp_private/system_internal.h"
#include "soc/rtc_periph.h"
#include "esp32p4/rom/rtc.h"
static void esp_reset_reason_clear_hint(void);
static esp_reset_reason_t s_reset_reason;
static esp_reset_reason_t get_reset_reason(soc_reset_reason_t rtc_reset_reason, esp_reset_reason_t reset_reason_hint)
{
switch (rtc_reset_reason) {
case RESET_REASON_CHIP_POWER_ON:
return ESP_RST_POWERON;
case RESET_REASON_CPU0_SW:
case RESET_REASON_CORE_SW:
if (reset_reason_hint == ESP_RST_PANIC ||
reset_reason_hint == ESP_RST_BROWNOUT ||
reset_reason_hint == ESP_RST_TASK_WDT ||
reset_reason_hint == ESP_RST_INT_WDT) {
return reset_reason_hint;
}
return ESP_RST_SW;
case RESET_REASON_CORE_DEEP_SLEEP:
return ESP_RST_DEEPSLEEP;
case RESET_REASON_CORE_MWDT0:
return ESP_RST_TASK_WDT;
case RESET_REASON_CORE_MWDT1:
return ESP_RST_INT_WDT;
case RESET_REASON_CORE_RTC_WDT:
case RESET_REASON_SYS_RTC_WDT:
case RESET_REASON_SYS_SUPER_WDT:
case RESET_REASON_CPU0_RTC_WDT:
case RESET_REASON_CPU0_MWDT0:
case RESET_REASON_CPU0_MWDT1:
return ESP_RST_WDT;
case RESET_REASON_SYS_BROWN_OUT:
return ESP_RST_BROWNOUT;
default:
return ESP_RST_UNKNOWN;
}
}
static void __attribute__((constructor)) esp_reset_reason_init(void)
{
esp_reset_reason_t hint = esp_reset_reason_get_hint();
s_reset_reason = get_reset_reason(esp_rom_get_reset_reason(PRO_CPU_NUM), hint);
if (hint != ESP_RST_UNKNOWN) {
esp_reset_reason_clear_hint();
}
}
esp_reset_reason_t esp_reset_reason(void)
{
return s_reset_reason;
}
/* Reset reason hint is stored in RTC_RESET_CAUSE_REG, a.k.a. RTC_CNTL_STORE6_REG,
* a.k.a. RTC_ENTRY_ADDR_REG. It is safe to use this register both for the
* deep sleep wake stub entry address and for reset reason hint, since wake stub
* is only used for deep sleep reset, and in this case the reason provided by
* esp_rom_get_reset_reason is unambiguous.
*
* Same layout is used as for RTC_APB_FREQ_REG (a.k.a. RTC_CNTL_STORE5_REG):
* the value is replicated in low and high half-words. In addition to that,
* MSB is set to 1, which doesn't happen when RTC_CNTL_STORE6_REG contains
* deep sleep wake stub address.
*/
#define RST_REASON_BIT 0x80000000
#define RST_REASON_MASK 0x7FFF
#define RST_REASON_SHIFT 16
/* in IRAM, can be called from panic handler */
void IRAM_ATTR esp_reset_reason_set_hint(esp_reset_reason_t hint)
{
assert((hint & (~RST_REASON_MASK)) == 0);
uint32_t val = hint | (hint << RST_REASON_SHIFT) | RST_REASON_BIT;
REG_WRITE(RTC_RESET_CAUSE_REG, val);
}
/* in IRAM, can be called from panic handler */
esp_reset_reason_t IRAM_ATTR esp_reset_reason_get_hint(void)
{
uint32_t reset_reason_hint = REG_READ(RTC_RESET_CAUSE_REG);
uint32_t high = (reset_reason_hint >> RST_REASON_SHIFT) & RST_REASON_MASK;
uint32_t low = reset_reason_hint & RST_REASON_MASK;
if ((reset_reason_hint & RST_REASON_BIT) == 0 || high != low) {
return ESP_RST_UNKNOWN;
}
return (esp_reset_reason_t) low;
}
static inline void esp_reset_reason_clear_hint(void)
{
REG_WRITE(RTC_RESET_CAUSE_REG, 0);
}

View File

@@ -0,0 +1,143 @@
/*
* SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <string.h>
#include "sdkconfig.h"
#include "esp_system.h"
#include "esp_private/system_internal.h"
#include "esp_attr.h"
#include "esp_log.h"
#include "esp_rom_sys.h"
#include "riscv/rv_utils.h"
#include "esp_rom_uart.h"
#include "soc/gpio_reg.h"
#include "esp_cpu.h"
#include "soc/rtc.h"
#include "esp_private/rtc_clk.h"
#include "soc/rtc_periph.h"
#include "soc/uart_reg.h"
#include "hal/wdt_hal.h"
#include "esp_private/cache_err_int.h"
#include "esp32p4/rom/cache.h"
#include "esp32p4/rom/rtc.h"
#include "soc/hp_sys_clkrst_reg.h"
#include "soc/lp_clkrst_reg.h"
#include "soc/hp_system_reg.h"
void IRAM_ATTR esp_system_reset_modules_on_exit(void)
{
// Flush any data left in UART FIFOs
esp_rom_uart_tx_wait_idle(0);
esp_rom_uart_tx_wait_idle(1);
// Set Peripheral clk rst
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_TIMERGRP0);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_TIMERGRP1);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_STIMER);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_DUAL_MSPI_AXI);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_MSPI_AXI);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART0_CORE);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART1_CORE);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART2_CORE);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART3_CORE);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART4_CORE);
SET_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_GDMA);
// Clear Peripheral clk rst
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_TIMERGRP0);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_TIMERGRP1);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_STIMER);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_DUAL_MSPI_AXI);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_MSPI_AXI);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART0_CORE);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART1_CORE);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART2_CORE);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART3_CORE);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN1_REG, HP_SYS_CLKRST_REG_RST_EN_UART4_CORE);
CLEAR_PERI_REG_MASK(HP_SYS_CLKRST_HP_RST_EN0_REG, HP_SYS_CLKRST_REG_RST_EN_GDMA);
}
/* "inner" restart function for after RTOS, interrupts & anything else on this
* core are already stopped. Stalls other core, resets hardware,
* triggers restart.
*/
void IRAM_ATTR esp_restart_noos(void)
{
// Disable interrupts
rv_utils_intr_global_disable();
// Enable RTC watchdog for 1 second
wdt_hal_context_t rtc_wdt_ctx;
wdt_hal_init(&rtc_wdt_ctx, WDT_RWDT, 0, false);
uint32_t stage_timeout_ticks = (uint32_t)(1000ULL * rtc_clk_slow_freq_get_hz() / 1000ULL);
wdt_hal_write_protect_disable(&rtc_wdt_ctx);
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE0, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_SYSTEM);
wdt_hal_config_stage(&rtc_wdt_ctx, WDT_STAGE1, stage_timeout_ticks, WDT_STAGE_ACTION_RESET_RTC);
//Enable flash boot mode so that flash booting after restart is protected by the RTC WDT.
wdt_hal_set_flashboot_en(&rtc_wdt_ctx, true);
wdt_hal_write_protect_enable(&rtc_wdt_ctx);
const uint32_t core_id = esp_cpu_get_core_id();
#if !CONFIG_FREERTOS_UNICORE
const uint32_t other_core_id = (core_id == 0) ? 1 : 0;
esp_cpu_reset(other_core_id);
esp_cpu_stall(other_core_id);
#endif
// Disable TG0/TG1 watchdogs
wdt_hal_context_t wdt0_context = {.inst = WDT_MWDT0, .mwdt_dev = &TIMERG0};
wdt_hal_write_protect_disable(&wdt0_context);
wdt_hal_disable(&wdt0_context);
wdt_hal_write_protect_enable(&wdt0_context);
wdt_hal_context_t wdt1_context = {.inst = WDT_MWDT1, .mwdt_dev = &TIMERG1};
wdt_hal_write_protect_disable(&wdt1_context);
wdt_hal_disable(&wdt1_context);
wdt_hal_write_protect_enable(&wdt1_context);
// Disable cache
Cache_Disable_L2_Cache();
esp_system_reset_modules_on_exit();
// Set CPU back to XTAL source, no PLL, same as hard reset
#if !CONFIG_IDF_ENV_FPGA
rtc_clk_cpu_freq_set_xtal();
#endif
#if !CONFIG_FREERTOS_UNICORE
// clear entry point for APP CPU
ets_set_appcpu_boot_addr(0);
#endif
#if CONFIG_SPIRAM_INSTRUCTIONS_RODATA
//TODO: IDF-7556
// disable remap if enabled in menuconfig
REG_CLR_BIT(HP_SYS_HP_PSRAM_FLASH_ADDR_INTERCHANGE_REG, HP_SYS_HP_PSRAM_FLASH_ADDR_INTERCHANGE_DMA | HP_SYS_HP_PSRAM_FLASH_ADDR_INTERCHANGE_CPU);
#endif
// Reset CPUs
if (core_id == 0) {
// Running on PRO CPU: APP CPU is stalled. Can reset both CPUs.
#if !CONFIG_FREERTOS_UNICORE
esp_cpu_reset(1);
#endif
esp_cpu_reset(0);
}
#if !CONFIG_FREERTOS_UNICORE
else {
// Running on APP CPU: need to reset PRO CPU and unstall it,
// then reset APP CPU
esp_cpu_reset(0);
esp_cpu_unstall(0);
esp_cpu_reset(1);
}
#endif
while (true) {
;
}
}

View File

@@ -27,6 +27,8 @@
#include "esp32c6/rtc.h" #include "esp32c6/rtc.h"
#elif CONFIG_IDF_TARGET_ESP32H2 #elif CONFIG_IDF_TARGET_ESP32H2
#include "esp32h2/rtc.h" #include "esp32h2/rtc.h"
#elif CONFIG_IDF_TARGET_ESP32P4
#include "esp32p4/rtc.h"
#endif #endif
#include "esp_private/startup_internal.h" #include "esp_private/startup_internal.h"

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */

View File

@@ -61,9 +61,9 @@ typedef enum {
ETS_RMT_INTR_SOURCE, ETS_RMT_INTR_SOURCE,
ETS_I2C0_INTR_SOURCE, ETS_I2C0_INTR_SOURCE,
ETS_I2C1_INTR_SOURCE, ETS_I2C1_INTR_SOURCE,
ETS_TIMERGROUP0_T0_INTR_SOURCE, ETS_TG0_WDT_LEVEL_INTR_SOURCE,
ETS_TIMERGROUP0_T1_INTR_SOURCE, ETS_TG1_WDT_LEVEL_INTR_SOURCE,
ETS_TIMERGROUP0_WDT_INTR_SOURCE, ETS_TG0_WDT_INTR_SOURCE,
ETS_TIMERGROUP1_T0_INTR_SOURCE, ETS_TIMERGROUP1_T0_INTR_SOURCE,
ETS_TIMERGROUP1_T1_INTR_SOURCE, ETS_TIMERGROUP1_T1_INTR_SOURCE,
ETS_TIMERGROUP1_WDT_INTR_SOURCE, ETS_TIMERGROUP1_WDT_INTR_SOURCE,
@@ -96,10 +96,10 @@ typedef enum {
ETS_GPIO_INTR2_SOURCE, ETS_GPIO_INTR2_SOURCE,
ETS_GPIO_INTR3_SOURCE, ETS_GPIO_INTR3_SOURCE,
ETS_GPIO_PAD_COMP_INTR_SOURCE, ETS_GPIO_PAD_COMP_INTR_SOURCE,
ETS_CPU_INT_FROM_CPU0_INTR_SOURCE, ETS_FROM_CPU_INTR0_SOURCE,
ETS_CPU_INT_FROM_CPU1_INTR_SOURCE, ETS_FROM_CPU_INTR1_SOURCE,
ETS_CPU_INT_FROM_CPU2_INTR_SOURCE, ETS_FROM_CPU_INTR2_SOURCE,
ETS_CPU_INT_FROM_CPU3_INTR_SOURCE, ETS_FROM_CPU_INTR3_SOURCE,
ETS_CACHE_INTR_SOURCE, ETS_CACHE_INTR_SOURCE,
ETS_MSPI_INTR_SOURCE, ETS_MSPI_INTR_SOURCE,
ETS_CSI_BRIDGE_INTR_SOURCE, ETS_CSI_BRIDGE_INTR_SOURCE,