fix(esp32p4): Fixed interrupt handling to use the CLIC controller

This commit is contained in:
Omar Chebib
2023-08-14 15:44:24 +08:00
parent eb8883cc20
commit 8ca191e4c1
27 changed files with 822 additions and 510 deletions

View File

@@ -156,21 +156,32 @@ void esp_cpu_wait_for_intr(void)
#if SOC_CPU_HAS_FLEXIBLE_INTC #if SOC_CPU_HAS_FLEXIBLE_INTC
#if SOC_INT_CLIC_SUPPORTED
static bool is_intr_num_resv(int ext_intr_num) {
/* On targets that uses CLIC as the interrupt controller, the first 16 lines (0..15) are reserved for software
* interrupts, all the other lines starting from 16 and above can be used by external peripheral.
* in the case of this function, the parameter only refers to the external peripheral index, so if
* `ext_intr_num` is 0, it refers to interrupt index 16.
*
* Only interrupt line 6 is reserved at the moment since it is used for disabling interrupts */
return ext_intr_num == 6;
}
#else // !SOC_INT_CLIC_SUPPORTED
static bool is_intr_num_resv(int intr_num) static bool is_intr_num_resv(int intr_num)
{ {
// Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt" // Workaround to reserve interrupt number 1 for Wi-Fi, 5,8 for Bluetooth, 6 for "permanently disabled interrupt"
// [TODO: IDF-2465] // [TODO: IDF-2465]
uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8); uint32_t reserved = BIT(1) | BIT(5) | BIT(6) | BIT(8);
// int_num 0,3,4,7 are inavaliable for PULP cpu // int_num 0,3,4,7 are unavailable for PULP cpu
#if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5728 replace with a better macro name #if CONFIG_IDF_TARGET_ESP32C6 || CONFIG_IDF_TARGET_ESP32H2// TODO: IDF-5728 replace with a better macro name
reserved |= BIT(0) | BIT(3) | BIT(4) | BIT(7); reserved |= BIT(0) | BIT(3) | BIT(4) | BIT(7);
#endif #endif
#if SOC_INT_CLIC_SUPPORTED
//TODO: IDF-7795
return false;
#endif
if (reserved & BIT(intr_num)) { if (reserved & BIT(intr_num)) {
return true; return true;
} }
@@ -185,6 +196,8 @@ static bool is_intr_num_resv(int intr_num)
return destination != (intptr_t)&_interrupt_handler; return destination != (intptr_t)&_interrupt_handler;
} }
#endif // SOC_INT_CLIC_SUPPORTED
void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret) void esp_cpu_intr_get_desc(int core_id, int intr_num, esp_cpu_intr_desc_t *intr_desc_ret)
{ {
intr_desc_ret->priority = 1; //Todo: We should make this -1 intr_desc_ret->priority = 1; //Todo: We should make this -1

View File

@@ -233,7 +233,7 @@ FORCE_INLINE_ATTR void esp_cpu_intr_set_ivt_addr(const void *ivt_addr)
#endif #endif
} }
#if CONFIG_IDF_TARGET_ESP32P4 #if SOC_INT_CLIC_SUPPORTED
//TODO: IDF-7863 //TODO: IDF-7863
//"MTVT is only implemented in RISC-V arch" //"MTVT is only implemented in RISC-V arch"
/** /**
@@ -245,7 +245,7 @@ FORCE_INLINE_ATTR void esp_cpu_intr_set_mtvt_addr(const void *mtvt_addr)
{ {
rv_utils_set_mtvt((uint32_t)mtvt_addr); rv_utils_set_mtvt((uint32_t)mtvt_addr);
} }
#endif //#if CONFIG_IDF_TARGET_ESP32P4 #endif //#if SOC_INT_CLIC_SUPPORTED
#if SOC_CPU_HAS_FLEXIBLE_INTC #if SOC_CPU_HAS_FLEXIBLE_INTC
/** /**

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -30,6 +30,9 @@
#include "esp_ipc.h" #include "esp_ipc.h"
#endif #endif
/* For targets that uses a CLIC as their interrupt controller, CPU_INT_LINES_COUNT represents the external interrupts count */
#define CPU_INT_LINES_COUNT 32
static const char* TAG = "intr_alloc"; static const char* TAG = "intr_alloc";
#define ETS_INTERNAL_TIMER0_INTR_NO 6 #define ETS_INTERNAL_TIMER0_INTR_NO 6
@@ -161,7 +164,7 @@ static vector_desc_t *get_desc_for_int(int intno, int cpu)
} }
} }
//Returns a vector_desc entry for an source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs //Returns a vector_desc entry for a source, the cpu parameter is used to tell GPIO_INT and GPIO_NMI from different CPUs
static vector_desc_t * find_desc_for_source(int source, int cpu) static vector_desc_t * find_desc_for_source(int source, int cpu)
{ {
vector_desc_t *vd = vector_desc_head; vector_desc_t *vd = vector_desc_head;
@@ -326,11 +329,11 @@ static int get_available_int(int flags, int cpu, int force, int source)
vector_desc_t *vd = find_desc_for_source(source, cpu); vector_desc_t *vd = find_desc_for_source(source, cpu);
if (vd) { if (vd) {
// if existing vd found, don't need to search any more. // if existing vd found, don't need to search any more.
ALCHLOG("get_avalible_int: existing vd found. intno: %d", vd->intno); ALCHLOG("get_available_int: existing vd found. intno: %d", vd->intno);
if ( force != -1 && force != vd->intno ) { if ( force != -1 && force != vd->intno ) {
ALCHLOG("get_avalible_int: intr forced but not matach existing. existing intno: %d, force: %d", vd->intno, force); ALCHLOG("get_available_int: intr forced but does not match existing. existing intno: %d, force: %d", vd->intno, force);
} else if (!is_vect_desc_usable(vd, flags, cpu, force)) { } else if (!is_vect_desc_usable(vd, flags, cpu, force)) {
ALCHLOG("get_avalible_int: existing vd invalid."); ALCHLOG("get_available_int: existing vd invalid.");
} else { } else {
best = vd->intno; best = vd->intno;
} }
@@ -348,14 +351,14 @@ static int get_available_int(int flags, int cpu, int force, int source)
if (is_vect_desc_usable(vd, flags, cpu, force)) { if (is_vect_desc_usable(vd, flags, cpu, force)) {
best = vd->intno; best = vd->intno;
} else { } else {
ALCHLOG("get_avalible_int: forced vd invalid."); ALCHLOG("get_avalaible_int: forced vd invalid.");
} }
return best; return best;
} }
ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu); ALCHLOG("get_free_int: start looking. Current cpu: %d", cpu);
//No allocated handlers as well as forced intr, iterate over the 32 possible interrupts /* No allocated handlers as well as forced intr, iterate over the 32 possible interrupts */
for (x = 0; x < 32; x++) { for (x = 0; x < CPU_INT_LINES_COUNT; x++) {
//Grab the vector_desc for this vector. //Grab the vector_desc for this vector.
vd = find_desc_for_int(x, cpu); vd = find_desc_for_int(x, cpu);
if (vd == NULL) { if (vd == NULL) {
@@ -811,12 +814,13 @@ esp_err_t IRAM_ATTR esp_intr_enable(intr_handle_t handle)
esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle) esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
{ {
if (!handle) { if (handle == NULL) {
return ESP_ERR_INVALID_ARG; return ESP_ERR_INVALID_ARG;
} }
portENTER_CRITICAL_SAFE(&spinlock); portENTER_CRITICAL_SAFE(&spinlock);
int source; int source;
bool disabled = 1; bool disabled = true;
if (handle->shared_vector_desc) { if (handle->shared_vector_desc) {
handle->shared_vector_desc->disabled = 1; handle->shared_vector_desc->disabled = 1;
source=handle->shared_vector_desc->source; source=handle->shared_vector_desc->source;
@@ -824,8 +828,8 @@ esp_err_t IRAM_ATTR esp_intr_disable(intr_handle_t handle)
shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info; shared_vector_desc_t *svd = handle->vector_desc->shared_vec_info;
assert(svd != NULL); assert(svd != NULL);
while(svd) { while(svd) {
if (svd->source == source && svd->disabled == 0) { if (svd->source == source && !svd->disabled) {
disabled = 0; disabled = false;
break; break;
} }
svd = svd->next; svd = svd->next;
@@ -924,7 +928,7 @@ esp_err_t esp_intr_dump(FILE *stream)
for (int cpu = 0; cpu < cpu_num; ++cpu) { for (int cpu = 0; cpu < cpu_num; ++cpu) {
fprintf(stream, "CPU %d interrupt status:\n", cpu); fprintf(stream, "CPU %d interrupt status:\n", cpu);
fprintf(stream, " Int Level Type Status\n"); fprintf(stream, " Int Level Type Status\n");
for (int i_num = 0; i_num < 32; ++i_num) { for (int i_num = 0; i_num < CPU_INT_LINES_COUNT; ++i_num) {
fprintf(stream, " %2d ", i_num); fprintf(stream, " %2d ", i_num);
esp_cpu_intr_desc_t intr_desc; esp_cpu_intr_desc_t intr_desc;
esp_cpu_intr_get_desc(cpu, i_num, &intr_desc); esp_cpu_intr_get_desc(cpu, i_num, &intr_desc);

View File

@@ -1,5 +1,5 @@
/* /*
* SPDX-FileCopyrightText: 2010-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2010-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
@@ -76,12 +76,14 @@ soc_reset_reason_t esp_rom_get_reset_reason(int cpu_no);
* Usually there're 4 steps to use an interrupt: * Usually there're 4 steps to use an interrupt:
* 1. Route peripheral interrupt source to CPU. e.g. esp_rom_route_intr_matrix(0, ETS_WIFI_MAC_INTR_SOURCE, ETS_WMAC_INUM) * 1. Route peripheral interrupt source to CPU. e.g. esp_rom_route_intr_matrix(0, ETS_WIFI_MAC_INTR_SOURCE, ETS_WMAC_INUM)
* 2. Set interrupt handler for CPU * 2. Set interrupt handler for CPU
* 3. Enable CPU interupt * 3. Enable CPU interrupt
* 4. Enable peripheral interrupt * 4. Enable peripheral interrupt
* *
* @param cpu_core The CPU number, which the peripheral interupt will inform to * @param cpu_core The CPU number, which the peripheral interrupt will inform to
* @param periph_intr_id The peripheral interrupt source number * @param periph_intr_id The peripheral interrupt source number
* @param cpu_intr_num The CPU interrupt number * @param cpu_intr_num The CPU (external) interrupt number. On targets that use CLIC as their interrupt controller,
* this number represents the external interrupt number. For example, passing `cpu_intr_num = i`
* to this function would in fact bind peripheral source to CPU interrupt `CLIC_EXT_INTR_NUM_OFFSET + i`.
*/ */
void esp_rom_route_intr_matrix(int cpu_core, uint32_t periph_intr_id, uint32_t cpu_intr_num); void esp_rom_route_intr_matrix(int cpu_core, uint32_t periph_intr_id, uint32_t cpu_intr_num);

View File

@@ -14,6 +14,7 @@ SECTIONS
_iram_start = ABSOLUTE(.); _iram_start = ABSOLUTE(.);
/* Vectors go to start of IRAM */ /* Vectors go to start of IRAM */
ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned"); ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned");
KEEP(*(.exception_vectors_table.text));
KEEP(*(.exception_vectors.text)); KEEP(*(.exception_vectors.text));
. = ALIGN(4); . = ALIGN(4);

View File

@@ -145,6 +145,7 @@ SECTIONS
_iram_start = ABSOLUTE(.); _iram_start = ABSOLUTE(.);
/* Vectors go to start of IRAM */ /* Vectors go to start of IRAM */
ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned"); ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned");
KEEP(*(.exception_vectors_table.text));
KEEP(*(.exception_vectors.text)); KEEP(*(.exception_vectors.text));
. = ALIGN(4); . = ALIGN(4);

View File

@@ -147,6 +147,7 @@ SECTIONS
_iram_start = ABSOLUTE(.); _iram_start = ABSOLUTE(.);
/* Vectors go to start of IRAM */ /* Vectors go to start of IRAM */
ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned"); ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned");
KEEP(*(.exception_vectors_table.text));
KEEP(*(.exception_vectors.text)); KEEP(*(.exception_vectors.text));
. = ALIGN(4); . = ALIGN(4);

View File

@@ -147,6 +147,7 @@ SECTIONS
_iram_start = ABSOLUTE(.); _iram_start = ABSOLUTE(.);
/* Vectors go to start of IRAM */ /* Vectors go to start of IRAM */
ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned"); ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned");
KEEP(*(.exception_vectors_table.text));
KEEP(*(.exception_vectors.text)); KEEP(*(.exception_vectors.text));
. = ALIGN(4); . = ALIGN(4);

View File

@@ -167,7 +167,8 @@ SECTIONS
{ {
_iram_start = ABSOLUTE(.); _iram_start = ABSOLUTE(.);
/* Vectors go to start of IRAM */ /* Vectors go to start of IRAM */
ASSERT(ABSOLUTE(.) % 0x100 == 0, "vector address must be 256 byte aligned"); ASSERT(ABSOLUTE(.) % 0x40 == 0, "vector address must be 64 byte aligned");
KEEP(*(.exception_vectors_table.text));
KEEP(*(.exception_vectors.text)); KEEP(*(.exception_vectors.text));
. = ALIGN(4); . = ALIGN(4);

View File

@@ -150,14 +150,22 @@ static void core_intr_matrix_clear(void)
for (int i = 0; i < ETS_MAX_INTR_SOURCE; i++) { for (int i = 0; i < ETS_MAX_INTR_SOURCE; i++) {
#if CONFIG_IDF_TARGET_ESP32P4 #if CONFIG_IDF_TARGET_ESP32P4
if (core_id == 0) { if (core_id == 0) {
REG_WRITE(INTERRUPT_CORE0_LP_RTC_INT_MAP_REG + 4 * i, 0); REG_WRITE(INTERRUPT_CORE0_LP_RTC_INT_MAP_REG + 4 * i, ETS_INVALID_INUM);
} else { } else {
REG_WRITE(INTERRUPT_CORE1_LP_RTC_INT_MAP_REG + 4 * i, 0); REG_WRITE(INTERRUPT_CORE1_LP_RTC_INT_MAP_REG + 4 * i, ETS_INVALID_INUM);
} }
#else #else
esp_rom_route_intr_matrix(core_id, i, ETS_INVALID_INUM); esp_rom_route_intr_matrix(core_id, i, ETS_INVALID_INUM);
#endif #endif // CONFIG_IDF_TARGET_ESP32P4
} }
#if SOC_INT_CLIC_SUPPORTED
for (int i = 0; i < 32; i++) {
/* Set all the CPU interrupt lines to vectored by default, as it is on other RISC-V targets */
esprv_intc_int_set_vectored(i, true);
}
#endif // SOC_INT_CLIC_SUPPORTED
} }
#if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE #if !CONFIG_ESP_SYSTEM_SINGLE_CORE_MODE

View File

@@ -6,6 +6,7 @@
#pragma once #pragma once
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
#include "sdkconfig.h"
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@@ -35,24 +36,99 @@ uint32_t esp_hw_stack_guard_get_pc(void);
#define ASSIST_DEBUG_CORE_0_SP_MIN_OFFSET (ASSIST_DEBUG_CORE_0_SP_MIN_REG - ASSIST_DEBUG_CORE_0_INTR_ENA_REG) #define ASSIST_DEBUG_CORE_0_SP_MIN_OFFSET (ASSIST_DEBUG_CORE_0_SP_MIN_REG - ASSIST_DEBUG_CORE_0_INTR_ENA_REG)
#define ASSIST_DEBUG_CORE_0_SP_MAX_OFFSET (ASSIST_DEBUG_CORE_0_SP_MAX_REG - ASSIST_DEBUG_CORE_0_INTR_ENA_REG) #define ASSIST_DEBUG_CORE_0_SP_MAX_OFFSET (ASSIST_DEBUG_CORE_0_SP_MAX_REG - ASSIST_DEBUG_CORE_0_INTR_ENA_REG)
.macro ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0 .macro ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0 reg1
lui t0, ASSIST_DEBUG_CORE_0_INTR_ENA_REG_IMM lui \reg1, ASSIST_DEBUG_CORE_0_INTR_ENA_REG_IMM
sw a0, ASSIST_DEBUG_CORE_0_SP_MIN_OFFSET(t0) sw a0, ASSIST_DEBUG_CORE_0_SP_MIN_OFFSET(\reg1)
sw a1, ASSIST_DEBUG_CORE_0_SP_MAX_OFFSET(t0) sw a1, ASSIST_DEBUG_CORE_0_SP_MAX_OFFSET(\reg1)
.endm .endm
.macro ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0 .macro ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0 reg1 reg2
lui t0, ASSIST_DEBUG_CORE_0_INTR_ENA_REG_IMM lui \reg1, ASSIST_DEBUG_CORE_0_INTR_ENA_REG_IMM
lw t1, 0(t0) lw \reg2, 0(\reg1)
andi t1, t1, ~ASSIST_DEBUG_SP_SPILL_BITS andi \reg2, \reg2, ~ASSIST_DEBUG_SP_SPILL_BITS
sw t1, 0(t0) sw \reg2, 0(\reg1)
.endm .endm
.macro ESP_HW_STACK_GUARD_MONITOR_START_CPU0 .macro ESP_HW_STACK_GUARD_MONITOR_START_CPU0 reg1 reg2
lui t0, ASSIST_DEBUG_CORE_0_INTR_ENA_REG_IMM lui \reg1, ASSIST_DEBUG_CORE_0_INTR_ENA_REG_IMM
lw t1, 0(t0) lw \reg2, 0(\reg1)
ori t1, t1, ASSIST_DEBUG_SP_SPILL_BITS ori \reg2, \reg2, ASSIST_DEBUG_SP_SPILL_BITS
sw t1, 0(t0) sw \reg2, 0(\reg1)
.endm .endm
#if SOC_CPU_CORES_NUM > 1
#define ASSIST_DEBUG_CORE_1_INTR_ENA_REG_IMM (ASSIST_DEBUG_CORE_1_INTR_ENA_REG >> 12)
#define ASSIST_DEBUG_CORE_1_SP_MIN_OFFSET (ASSIST_DEBUG_CORE_1_SP_MIN_REG - ASSIST_DEBUG_CORE_1_INTR_ENA_REG)
#define ASSIST_DEBUG_CORE_1_SP_MAX_OFFSET (ASSIST_DEBUG_CORE_1_SP_MAX_REG - ASSIST_DEBUG_CORE_1_INTR_ENA_REG)
.macro ESP_HW_STACK_GUARD_SET_BOUNDS_CPU1 reg1
lui \reg1, ASSIST_DEBUG_CORE_1_INTR_ENA_REG_IMM
sw a0, ASSIST_DEBUG_CORE_1_SP_MIN_OFFSET(\reg1)
sw a1, ASSIST_DEBUG_CORE_1_SP_MAX_OFFSET(\reg1)
.endm
.macro ESP_HW_STACK_GUARD_MONITOR_STOP_CPU1 reg1 reg2
lui \reg1, ASSIST_DEBUG_CORE_1_INTR_ENA_REG_IMM
lw \reg2, 0(\reg1)
andi \reg2, \reg2, ~ASSIST_DEBUG_SP_SPILL_BITS
sw \reg2, 0(\reg1)
.endm
.macro ESP_HW_STACK_GUARD_MONITOR_START_CPU1 reg1 reg2
lui \reg1, ASSIST_DEBUG_CORE_1_INTR_ENA_REG_IMM
lw \reg2, 0(\reg1)
ori \reg2, \reg2, ASSIST_DEBUG_SP_SPILL_BITS
sw \reg2, 0(\reg1)
.endm
.macro ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE reg1
/* Check the current core ID */
csrr \reg1, mhartid
beqz \reg1, @1f
/* Core 1 */
ESP_HW_STACK_GUARD_SET_BOUNDS_CPU1 \reg1
j 2f
1:
/* Core 0 */
ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0 \reg1
2:
.endm
.macro ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE reg1 reg2
/* Check the current core ID */
csrr \reg1, mhartid
beqz \reg1, @1f
/* Core 1 */
ESP_HW_STACK_GUARD_MONITOR_START_CPU1 \reg1 \reg2
j 2f
1:
/* Core 0 */
ESP_HW_STACK_GUARD_MONITOR_START_CPU0 \reg1 \reg2
2:
.endm
.macro ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE reg1 reg2
/* Check the current core ID */
csrr \reg1, mhartid
beqz \reg1, @1f
/* Core 1 */
ESP_HW_STACK_GUARD_MONITOR_STOP_CPU1 \reg1 \reg2
j 2f
1:
/* Core 0 */
ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0 \reg1 \reg2
2:
.endm
#else // SOC_CPU_CORES_NUM <= 1
#define ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0
#define ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE ESP_HW_STACK_GUARD_MONITOR_START_CPU0
#define ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0
#endif // SOC_CPU_CORES_NUM > 1
#endif // __ASSEMBLER__ #endif // __ASSEMBLER__

View File

@@ -49,8 +49,8 @@ rtos_int_enter:
bne t4,zero, rtos_enter_end bne t4,zero, rtos_enter_end
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* esp_hw_stack_guard_monitor_stop(); */ /* esp_hw_stack_guard_monitor_stop(); pass the scratch registers */
ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0 ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
/* Save current TCB and load the ISR stack */ /* Save current TCB and load the ISR stack */
@@ -62,9 +62,9 @@ rtos_int_enter:
/* esp_hw_stack_guard_set_bounds(xIsrStack, xIsrStackTop); */ /* esp_hw_stack_guard_set_bounds(xIsrStack, xIsrStackTop); */
la a0, xIsrStack la a0, xIsrStack
mv a1, sp mv a1, sp
ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0 ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE a2
/* esp_hw_stack_guard_monitor_start(); */ /* esp_hw_stack_guard_monitor_start(); */
ESP_HW_STACK_GUARD_MONITOR_START_CPU0 ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
rtos_enter_end: rtos_enter_end:
@@ -118,7 +118,7 @@ no_switch:
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* esp_hw_stack_guard_monitor_stop(); */ /* esp_hw_stack_guard_monitor_stop(); */
ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0 ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
/* Recover the stack of next task */ /* Recover the stack of next task */
@@ -131,9 +131,9 @@ no_switch:
*/ */
lw a0, PORT_OFFSET_PX_STACK(t0) lw a0, PORT_OFFSET_PX_STACK(t0)
lw a1, PORT_OFFSET_PX_END_OF_STACK(t0) lw a1, PORT_OFFSET_PX_END_OF_STACK(t0)
ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0 ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE a2
/* esp_hw_stack_guard_monitor_start(); */ /* esp_hw_stack_guard_monitor_start(); */
ESP_HW_STACK_GUARD_MONITOR_START_CPU0 ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
rtos_exit_end: rtos_exit_end:

View File

@@ -92,6 +92,10 @@ volatile UBaseType_t xPortSwitchFlag[portNUM_PROCESSORS] = {0};
__attribute__((aligned(16))) StackType_t xIsrStack[portNUM_PROCESSORS][configISR_STACK_SIZE]; __attribute__((aligned(16))) StackType_t xIsrStack[portNUM_PROCESSORS][configISR_STACK_SIZE];
StackType_t *xIsrStackTop[portNUM_PROCESSORS] = {0}; StackType_t *xIsrStackTop[portNUM_PROCESSORS] = {0};
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
StackType_t *xIsrStackBottom[portNUM_PROCESSORS] = {0};
#endif
/* ------------------------------------------------ FreeRTOS Portable -------------------------------------------------- /* ------------------------------------------------ FreeRTOS Portable --------------------------------------------------
* - Provides implementation for functions required by FreeRTOS * - Provides implementation for functions required by FreeRTOS
* - Declared in portable.h * - Declared in portable.h
@@ -107,9 +111,12 @@ BaseType_t xPortStartScheduler(void)
port_uxCriticalNesting[coreID] = 0; port_uxCriticalNesting[coreID] = 0;
port_xSchedulerRunning[coreID] = 0; port_xSchedulerRunning[coreID] = 0;
/* Initialize ISR Stack top */ /* Initialize ISR Stack(s) */
for (int i = 0; i < portNUM_PROCESSORS; i++) { for (int i = 0; i < portNUM_PROCESSORS; i++) {
xIsrStackTop[i] = &xIsrStack[i][0] + (configISR_STACK_SIZE & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK))); xIsrStackTop[i] = &xIsrStack[i][0] + (configISR_STACK_SIZE & (~((portPOINTER_SIZE_TYPE)portBYTE_ALIGNMENT_MASK)));
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
xIsrStackBottom[i] = &xIsrStack[i][0];
#endif
} }
/* Setup the hardware to generate the tick. */ /* Setup the hardware to generate the tick. */

View File

@@ -19,7 +19,7 @@
.global vTaskSwitchContext .global vTaskSwitchContext
.global xPortSwitchFlag .global xPortSwitchFlag
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
.global xIsrStack .global xIsrStackBottom
.global port_offset_pxStack .global port_offset_pxStack
.global port_offset_pxEndOfStack .global port_offset_pxEndOfStack
.global esp_hw_stack_guard_monitor_stop .global esp_hw_stack_guard_monitor_stop
@@ -34,75 +34,73 @@
* current task stack pointer and places it into the pxCurrentTCB. * current task stack pointer and places it into the pxCurrentTCB.
* It then loads the ISR stack into sp. * It then loads the ISR stack into sp.
* TODO: ISR nesting code improvements ? * TODO: ISR nesting code improvements ?
* In the routines below, let's use a0-a5 registers to let the compiler generate
* 16-bit instructions.
*/ */
.global rtos_int_enter .global rtos_int_enter
.type rtos_int_enter, @function .type rtos_int_enter, @function
rtos_int_enter: rtos_int_enter:
#if CONFIG_IDF_TARGET_ESP32P4
//TODO: IDF-7861
/* preserve the return address */
mv t1, ra
mv t2, a0
#endif
/* If the scheduler is not enabled, jump directly to the ISR handler */
#if ( configNUM_CORES > 1 ) #if ( configNUM_CORES > 1 )
csrr t6, mhartid /* t6 = coreID */ csrr a5, mhartid /* a5 = coreID */
slli t6, t6, 2 /* t6 = coreID * 4 */ slli a5, a5, 2 /* a5 = coreID * 4 */
la t0, port_xSchedulerRunning /* t0 = &port_xSchedulerRunning */ la a0, port_xSchedulerRunning /* a0 = &port_xSchedulerRunning */
add t0, t0, t6 /* t0 = &port_xSchedulerRunning[coreID] */ add a0, a0, a5 /* a0 = &port_xSchedulerRunning[coreID] */
lw t0, (t0) /* t0 = port_xSchedulerRunning[coreID] */ lw a0, (a0) /* a0 = port_xSchedulerRunning[coreID] */
#else #else
lw t0, port_xSchedulerRunning /* t0 = port_xSchedulerRunning */ lw a0, port_xSchedulerRunning /* a0 = port_xSchedulerRunning */
#endif /* ( configNUM_CORES > 1 ) */ #endif /* ( configNUM_CORES > 1 ) */
beq t0, zero, rtos_int_enter_end /* if (port_xSchedulerRunning[coreID] == 0) jump to rtos_int_enter_end */ beqz a0, rtos_int_enter_end /* if (port_xSchedulerRunning[coreID] == 0) jump to rtos_int_enter_end */
/* Increment the ISR nesting count */ /* Increment the ISR nesting count */
la t3, port_uxInterruptNesting /* t3 = &port_usInterruptNesting */ la a0, port_uxInterruptNesting /* a0 = &port_uxInterruptNesting */
#if ( configNUM_CORES > 1 ) #if ( configNUM_CORES > 1 )
add t3, t3, t6 /* t3 = &port_uxInterruptNesting[coreID] // t6 already contains coreID * 4 */ add a0, a0, a5 /* a0 = &port_uxInterruptNesting[coreID] // a5 already contains coreID * 4 */
#endif /* ( configNUM_CORES > 1 ) */ #endif /* ( configNUM_CORES > 1 ) */
lw t4, 0x0(t3) /* t4 = port_uxInterruptNesting[coreID] */ lw a1, 0(a0) /* a1 = port_uxInterruptNesting[coreID] */
addi t5, t4, 1 /* t5 = t4 + 1 */ addi a2, a1, 1 /* a2 = a1 + 1 */
sw t5, 0x0(t3) /* port_uxInterruptNesting[coreID] = t5 */ sw a2, 0(a0) /* port_uxInterruptNesting[coreID] = a2 */
/* If we reached here from another low-prio ISR, i.e, port_uxInterruptNesting[coreID] > 0, then skip stack pushing to TCB */ /* If we reached here from another low-priority ISR, i.e, port_uxInterruptNesting[coreID] > 0, then skip stack pushing to TCB */
bne t4, zero, rtos_int_enter_end /* if (port_uxInterruptNesting[coreID] > 0) jump to rtos_int_enter_end */ bnez a1, rtos_int_enter_end /* if (port_uxInterruptNesting[coreID] > 0) jump to rtos_int_enter_end */
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* esp_hw_stack_guard_monitor_stop(); */ /* esp_hw_stack_guard_monitor_stop(); pass the scratch registers */
ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0 ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
/* Save the current sp in pxCurrentTCB[coreID] and load the ISR stack on to sp */ /* Save the current sp in pxCurrentTCB[coreID] and load the ISR stack on to sp */
#if ( configNUM_CORES > 1 ) #if ( configNUM_CORES > 1 )
la t0, pxCurrentTCB /* t0 = &pxCurrentTCB */ la a0, pxCurrentTCB /* a0 = &pxCurrentTCB */
add t0, t0, t6 /* t0 = &pxCurrentTCB[coreID] // t6 already contains coreID * 4 */ add a0, a0, a5 /* a0 = &pxCurrentTCB[coreID] // a5 already contains coreID * 4 */
lw t0, (t0) /* t0 = pxCurrentTCB[coreID] */ lw a0, (a0) /* a0 = pxCurrentTCB[coreID] */
sw sp, 0x0(t0) /* pxCurrentTCB[coreID] = sp */ sw sp, 0(a0) /* pxCurrentTCB[coreID] = sp */
la t0, xIsrStackTop /* t0 = &xIsrStackTop */ la a0, xIsrStackTop /* a0 = &xIsrStackTop */
add t0, t0, t6 /* t0 = &xIsrStackTop[coreID] // t6 already contains coreID * 4 */ add a0, a0, a5 /* a0 = &xIsrStackTop[coreID] // a5 already contains coreID * 4 */
lw sp, 0x0(t0) /* sp = xIsrStackTop[coreID] */ lw sp, (a0) /* sp = xIsrStackTop[coreID] */
#else #else
lw t0, pxCurrentTCB /* t0 = pxCurrentTCB */ lw a0, pxCurrentTCB /* a0 = pxCurrentTCB */
sw sp, 0x0(t0) /* pxCurrentTCB = sp */ sw sp, 0(a0) /* pxCurrentTCB[0] = sp */
lw sp, xIsrStackTop /* sp = xIsrStackTop */ lw sp, xIsrStackTop /* sp = xIsrStackTop */
#endif /* ( configNUM_CORES > 1 ) */ #endif /* ( configNUM_CORES > 1 ) */
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* esp_hw_stack_guard_set_bounds(xIsrStack, xIsrStackTop); */ /* Prepare the parameters for esp_hw_stack_guard_set_bounds(xIsrStackBottom, xIsrStackTop); */
la a0, xIsrStack #if ( configNUM_CORES > 1 )
/* Load the xIsrStack for the current core and set the new bounds */
la a0, xIsrStackBottom
add a0, a0, a5 /* a0 = &xIsrStackBottom[coreID] */
lw a0, (a0) /* a0 = xIsrStackBottom[coreID] */
#else
lw a0, xIsrStackBottom
#endif /* ( configNUM_CORES > 1 ) */
mv a1, sp mv a1, sp
ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0 /* esp_hw_stack_guard_set_bounds(xIsrStackBottom[coreID], xIsrStackTop[coreID]);
ESP_HW_STACK_GUARD_MONITOR_START_CPU0 */
ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE a2
ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
rtos_int_enter_end: rtos_int_enter_end:
#if CONFIG_IDF_TARGET_ESP32P4
//TODO: IDF-7861
mv ra, t1
#endif
ret ret
/** /**
@@ -111,98 +109,91 @@ rtos_int_enter_end:
.global rtos_int_exit .global rtos_int_exit
.type rtos_int_exit, @function .type rtos_int_exit, @function
rtos_int_exit: rtos_int_exit:
/* Skip if the scheduler was not started */
#if ( configNUM_CORES > 1 ) #if ( configNUM_CORES > 1 )
csrr t1, mhartid /* t1 = coreID */ csrr a1, mhartid /* a1 = coreID */
slli t1, t1, 2 /* t1 = t1 * 4 */ slli a1, a1, 2 /* a1 = a1 * 4 */
la t0, port_xSchedulerRunning /* t0 = &port_xSchedulerRunning */ la a0, port_xSchedulerRunning /* a0 = &port_xSchedulerRunning */
add t0, t0, t1 /* t0 = &port_xSchedulerRunning[coreID] */ add a0, a0, a1 /* a0 = &port_xSchedulerRunning[coreID] */
lw t0, (t0) /* t0 = port_xSchedulerRunning[coreID] */ lw a0, (a0) /* a0 = port_xSchedulerRunning[coreID] */
#else #else
lw t0, port_xSchedulerRunning /* t0 = port_xSchedulerRunning */ lw a0, port_xSchedulerRunning /* a0 = port_xSchedulerRunning */
#endif /* ( configNUM_CORES > 1 ) */ #endif /* ( configNUM_CORES > 1 ) */
beq t0, zero, rtos_int_exit_end /* if (port_uxSchewdulerRunning == 0) jump to rtos_int_exit_end */ beqz a0, rtos_int_exit_end /* if (port_uxSchewdulerRunning == 0) jump to rtos_int_exit_end */
/* Decrement interrupt nesting counter */ /* Update nesting interrupts counter */
la t2, port_uxInterruptNesting /* t2 = &port_uxInterruptNesting */ la a0, port_uxInterruptNesting /* a0 = &port_uxInterruptNesting */
#if ( configNUM_CORES > 1 ) #if ( configNUM_CORES > 1 )
add t2, t2, t1 /* t2 = &port_uxInterruptNesting[coreID] // t1 already contains coreID * 4 */ add a0, a0, a1 /* a0 = &port_uxInterruptNesting[coreID] // a1 already contains coreID * 4 */
#endif #endif /* ( configNUM_CORES > 1 ) */
lw t3, 0x0(t2) /* t3 = port_uxInterruptNesting[coreID] */ lw a2, 0(a0) /* a2 = port_uxInterruptNesting[coreID] */
/* If the interrupt nesting counter is already zero, then protect against underflow */ /* Already zero, protect against underflow */
beq t3, zero, isr_skip_decrement /* if (port_uxInterruptNesting[coreID] == 0) jump to isr_skip_decrement */ beqz a2, isr_skip_decrement /* if (port_uxInterruptNesting[coreID] == 0) jump to isr_skip_decrement */
addi t3, t3, -1 /* t3 = t3 - 1 */ addi a2, a2, -1 /* a2 = a2 - 1 */
sw t3, 0x0(t2) /* port_uxInterruptNesting[coreID] = t3 */ sw a2, 0(a0) /* port_uxInterruptNesting[coreID] = a2 */
/* May still have interrupts pending, skip section below and exit */
bnez a2, rtos_int_exit_end
isr_skip_decrement: isr_skip_decrement:
/* If the CPU reached this label, a2 (uxInterruptNesting) is 0 for sure */
/* We may still have interrupts pending. Skip the section below and exit */ /* Schedule the next task if a yield is pending */
bne t3, zero, rtos_int_exit_end /* (if port_uxInterruptNesting[coreID] > 0) jump to rtos_int_exit_end */ la a0, xPortSwitchFlag /* a0 = &xPortSwitchFlag */
/* Schedule the next task if an yield is pending */
la t0, xPortSwitchFlag /* t0 = &xPortSwitchFlag */
#if ( configNUM_CORES > 1 ) #if ( configNUM_CORES > 1 )
add t0, t0, t1 /* t0 = &xPortSwitchFlag[coreID] // t1 already contains coreID * 4 */ add a0, a0, a1 /* a0 = &xPortSwitchFlag[coreID] // a1 already contains coreID * 4 */
#endif /* ( configNUM_CORES > 1 ) */ #endif /* ( configNUM_CORES > 1 ) */
lw t2, 0x0(t0) /* t2 = xPortSwitchFlag[coreID] */ lw a2, 0(a0) /* a2 = xPortSwitchFlag[coreID] */
beq t2, zero, no_switch /* if (xPortSwitchFlag[coreID] == 0) jump to no_switch */ beqz a2, no_switch /* if (xPortSwitchFlag[coreID] == 0) jump to no_switch */
/* Save the return address on the stack and create space on the stack for the c-routine call to schedule /* Preserve return address and schedule next task. To speed up the process, instead of allocating stack
* the next task. Stack pointer for RISC-V should always be 16 byte aligned. After the switch, restore * space, let's use a callee-saved register: s0. Since the caller is not using it, let's use it. */
* the return address and sp. mv s0, ra
*/ call vTaskSwitchContext
addi sp, sp, -16 /* sp = sp - 16 */ mv ra, s0
sw ra, 0(sp) /* sp = ra */
call vTaskSwitchContext /* vTaskSwitchContext() */
lw ra, 0(sp) /* ra = sp */
addi sp, sp, 16 /* sp = sp + 16 */
/* Clear the switch pending flag */ /* Clears the switch pending flag */
la t0, xPortSwitchFlag /* t0 = &xPortSwitchFlag */ la a0, xPortSwitchFlag /* a0 = &xPortSwitchFlag */
#if ( configNUM_CORES > 1 ) #if ( configNUM_CORES > 1 )
/* c routine vTaskSwitchContext may change the temp registers, so we read again */ /* C routine vTaskSwitchContext may change the temp registers, so we read again */
csrr t3, mhartid /* t3 = coreID */ csrr a1, mhartid /* a1 = coreID */
slli t3, t3, 2 /* t3 = t3 * 4 */ slli a1, a1, 2 /* a1 = a1 * 4 */
add t0, t0, t3 /* t0 = &xPortSwitchFlag[coreID] */ add a0, a0, a1 /* a0 = &xPortSwitchFlag[coreID]; */
#endif /* ( configNUM_CORES > 1 ) */ #endif /* ( configNUM_CORES > 1 ) */
mv t2, zero /* t2 = 0 */ sw zero, 0(a0) /* xPortSwitchFlag[coreID] = 0; */
sw t2, 0x0(t0) /* xPortSwitchFlag[coreID] = t2 */
no_switch: no_switch:
#if SOC_INT_CLIC_SUPPORTED
/* Recover the stack of next task and prepare to exit */
la a0, pxCurrentTCB /* a0 = &pxCurrentTCB */
#if ( configNUM_CORES > 1 )
csrr t3, mhartid /* t3 = coreID */
slli t3, t3, 2 /* t3 = t3 * 4 */
add a0, a0, t3 /* a0 = &pxCurrentTCB[coreID] */
#endif /* ( configNUM_CORES > 1 ) */
lw a0, (a0) /* a0 = pxCurrentTCB[coreID] */
lw a0, 0x0(a0) /* a0 = previous sp */
#else
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* esp_hw_stack_guard_monitor_stop(); */ /* esp_hw_stack_guard_monitor_stop(); pass the scratch registers */
ESP_HW_STACK_GUARD_MONITOR_STOP_CPU0 ESP_HW_STACK_GUARD_MONITOR_STOP_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
#if ( configNUM_CORES > 1 )
/* Recover the stack of next task and prepare to exit */
csrr a1, mhartid
slli a1, a1, 2
la a0, pxCurrentTCB /* a0 = &pxCurrentTCB */
add a0, a0, a1 /* a0 = &pxCurrentTCB[coreID] */
lw a0, 0(a0) /* a0 = pxCurrentTCB[coreID] */
lw sp, 0(a0) /* sp = previous sp */
#else
/* Recover the stack of next task */ /* Recover the stack of next task */
lw t0, pxCurrentTCB lw a0, pxCurrentTCB
lw sp, 0x0(t0) lw sp, 0(a0)
#endif /* ( configNUM_CORES > 1 ) */
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD #if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* esp_hw_stack_guard_set_bounds(pxCurrentTCB[0]->pxStack, /* esp_hw_stack_guard_set_bounds(pxCurrentTCB[0]->pxStack,
* pxCurrentTCB[0]->pxEndOfStack); * pxCurrentTCB[0]->pxEndOfStack);
*/ */
lw a0, PORT_OFFSET_PX_STACK(t0) lw a1, PORT_OFFSET_PX_END_OF_STACK(a0)
lw a1, PORT_OFFSET_PX_END_OF_STACK(t0) lw a0, PORT_OFFSET_PX_STACK(a0)
ESP_HW_STACK_GUARD_SET_BOUNDS_CPU0 ESP_HW_STACK_GUARD_SET_BOUNDS_CUR_CORE a2
/* esp_hw_stack_guard_monitor_start(); */ /* esp_hw_stack_guard_monitor_start(); */
ESP_HW_STACK_GUARD_MONITOR_START_CPU0 ESP_HW_STACK_GUARD_MONITOR_START_CUR_CORE a0 a1
#endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */ #endif /* CONFIG_ESP_SYSTEM_HW_STACK_GUARD */
#endif /* SOC_INT_CLIC_SUPPORTED */
rtos_int_exit_end: rtos_int_exit_end:
ret ret

View File

@@ -14,6 +14,12 @@ else()
"instruction_decode.c" "instruction_decode.c"
"interrupt.c" "interrupt.c"
"vectors.S") "vectors.S")
if(CONFIG_SOC_INT_CLIC_SUPPORTED)
list(APPEND srcs "vectors_clic.S")
else()
list(APPEND srcs "vectors_intc.S")
endif()
endif() endif()
idf_component_register(SRCS "${srcs}" idf_component_register(SRCS "${srcs}"

View File

@@ -0,0 +1,35 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
/**
* The interrupt bit in `mcause` register is always bit 31 regardless of the interrupt controller used
*/
#define VECTORS_MCAUSE_INTBIT_MASK (0x80000000)
#if SOC_INT_CLIC_SUPPORTED
/* When using the CLIC as their interrupt controller, the `mcause` register contains more information than
* the interrupt bit and cause:
* MINHV[30]: CPU is fetching vector interrupt entry address or not
* MPP[29:28]: MSTATUS.MPP[1:0]
* MPIL[23:16]: interrupt level before entering interrupt ISR
*
* Define the mask that will only keep the cause.
*/
#define VECTORS_MCAUSE_REASON_MASK (0x00000fff)
#else // !if SOC_INT_CLIC_SUPPORTED
/**
* For targets that use the former INTC or CLINT/PLIC, the `mcause` shouldn't contain any more information
* but let's be safe and keep the 32 possible cause values.
*/
#define VECTORS_MCAUSE_REASON_MASK (0x0000001f)
#endif

View File

@@ -1,11 +1,13 @@
/* /*
* SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#pragma once #pragma once
#include <stdbool.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
@@ -124,6 +126,25 @@ void esprv_intc_int_set_threshold(int priority_threshold);
*/ */
uint32_t esprv_intc_get_interrupt_unmask(void); uint32_t esprv_intc_get_interrupt_unmask(void);
/**
* @brief Check if the given interrupt is hardware vectored
*
* @param rv_int_num Interrupt number
*
* @return true if the interrupt is vectored, false if it is not.
*/
bool esprv_intc_int_is_vectored(int rv_int_num);
/**
* @brief Set interrupt vectored
*
* Configure the given interrupt number to hardware vectored or non-vectored.
*
* @param rv_int_num Interrupt number
* @param vectored True to set it to vectored, false to set it to non-vectored
*/
void esprv_intc_int_set_vectored(int rv_int_num, bool vectored);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -133,8 +133,9 @@ FORCE_INLINE_ATTR void rv_utils_intr_disable(uint32_t intr_mask)
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE); RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
} }
//TODO: IDF-7795, clic related
#if (SOC_CPU_CORES_NUM > 1) #if SOC_INT_CLIC_SUPPORTED
FORCE_INLINE_ATTR void __attribute__((always_inline)) rv_utils_restore_intlevel(uint32_t restoreval) FORCE_INLINE_ATTR void __attribute__((always_inline)) rv_utils_restore_intlevel(uint32_t restoreval)
{ {
REG_SET_FIELD(CLIC_INT_THRESH_REG, CLIC_CPU_INT_THRESH, ((restoreval << (8 - NLBITS))) | 0x1f); REG_SET_FIELD(CLIC_INT_THRESH_REG, CLIC_CPU_INT_THRESH, ((restoreval << (8 - NLBITS))) | 0x1f);
@@ -145,8 +146,10 @@ FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_set_intlevel(
uint32_t old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE); uint32_t old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
uint32_t old_thresh; uint32_t old_thresh;
old_thresh = REG_READ(CLIC_INT_THRESH_REG); old_thresh = REG_GET_FIELD(CLIC_INT_THRESH_REG, CLIC_CPU_INT_THRESH);
old_thresh = old_thresh >> (24 + (8 - NLBITS)); old_thresh = (old_thresh >> (8 - NLBITS));
/* Upper bits should already be 0, but let's be safe and keep NLBITS */
old_thresh &= BIT(NLBITS) - 1;
REG_SET_FIELD(CLIC_INT_THRESH_REG, CLIC_CPU_INT_THRESH, ((intlevel << (8 - NLBITS))) | 0x1f); REG_SET_FIELD(CLIC_INT_THRESH_REG, CLIC_CPU_INT_THRESH, ((intlevel << (8 - NLBITS))) | 0x1f);
/** /**
@@ -166,19 +169,15 @@ FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_set_intlevel(
FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_mask_int_level_lower_than(uint32_t intlevel) FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_mask_int_level_lower_than(uint32_t intlevel)
{ {
#if SOC_INT_CLIC_SUPPORTED
/* CLIC's set interrupt level is inclusive, i.e. it does mask the set level */ /* CLIC's set interrupt level is inclusive, i.e. it does mask the set level */
return rv_utils_set_intlevel(intlevel - 1); return rv_utils_set_intlevel(intlevel - 1);
#else
return rv_utils_set_intlevel(intlevel);
#endif /* SOC_INT_CLIC_SUPPORTED */
} }
#endif //#if (SOC_CPU_CORES_NUM > 1) #endif /* SOC_INT_CLIC_SUPPORTED */
FORCE_INLINE_ATTR uint32_t rv_utils_intr_get_enabled_mask(void) FORCE_INLINE_ATTR uint32_t rv_utils_intr_get_enabled_mask(void)
{ {
//TODO: IDF-7795
#if SOC_INT_CLIC_SUPPORTED #if SOC_INT_CLIC_SUPPORTED
unsigned intr_ena_mask = 0; unsigned intr_ena_mask = 0;
unsigned intr_num; unsigned intr_num;
@@ -194,7 +193,6 @@ FORCE_INLINE_ATTR uint32_t rv_utils_intr_get_enabled_mask(void)
FORCE_INLINE_ATTR void rv_utils_intr_edge_ack(unsigned int intr_num) FORCE_INLINE_ATTR void rv_utils_intr_edge_ack(unsigned int intr_num)
{ {
//TODO: IDF-7795
#if SOC_INT_CLIC_SUPPORTED #if SOC_INT_CLIC_SUPPORTED
REG_SET_BIT(CLIC_INT_CTRL_REG(intr_num + CLIC_EXT_INTR_NUM_OFFSET) , CLIC_INT_IP); REG_SET_BIT(CLIC_INT_CTRL_REG(intr_num + CLIC_EXT_INTR_NUM_OFFSET) , CLIC_INT_IP);
#else #else

View File

@@ -14,21 +14,33 @@
#include "riscv/rv_utils.h" #include "riscv/rv_utils.h"
//TODO: IDF-7795, P4, see jira to know what changed and what need to be checked
#define RV_INT_COUNT 32
static inline void assert_valid_rv_int_num(int rv_int_num)
{
#if SOC_INT_CLIC_SUPPORTED #if SOC_INT_CLIC_SUPPORTED
assert(rv_int_num < RV_INT_COUNT && "Invalid CPU interrupt number");
#else
assert(rv_int_num != 0 && rv_int_num < RV_INT_COUNT && "Invalid CPU interrupt number");
#endif
}
/*************************** Software interrupt dispatcher ***************************/ /**
* If the target is using the CLIC as the interrupt controller, we have 32 external interrupt lines and 16 internal
* lines. Let's consider the internal ones reserved and not mappable to any handler.
*/
#define RV_EXTERNAL_INT_COUNT 32
#define RV_EXTERNAL_INT_OFFSET (CLIC_EXT_INTR_NUM_OFFSET)
#else // !SOC_INT_CLIC_SUPPORTED
/**
* In the case of INTC, all the interrupt lines are dedicated to external peripherals, so the offset is 0.
* In the case of PLIC, the reserved interrupts are not contiguous, moreover, they are already marked as
* unusable by the interrupt allocator, so the offset can also be 0 here.
*/
#define RV_EXTERNAL_INT_COUNT 32
#define RV_EXTERNAL_INT_OFFSET 0
/* Since DR_REG_INTERRUPT_CORE0_BASE is not defined on some single-core targets, use the former
* DR_REG_INTERRUPT_BASE macro instead. */
#ifndef DR_REG_INTERRUPT_CORE0_BASE
#define DR_REG_INTERRUPT_CORE0_BASE DR_REG_INTERRUPT_BASE
#endif // DR_REG_INTERRUPT_CORE0_BASE
#endif // SOC_INT_CLIC_SUPPORTED
typedef struct { typedef struct {
@@ -36,98 +48,78 @@ typedef struct {
void *arg; void *arg;
} intr_handler_item_t; } intr_handler_item_t;
#if SOC_INT_CLIC_SUPPORTED static intr_handler_item_t s_intr_handlers[SOC_CPU_CORES_NUM][RV_EXTERNAL_INT_COUNT];
static intr_handler_item_t s_intr_handlers_core0[48];
static intr_handler_item_t s_intr_handlers_core1[48];
#else
static intr_handler_item_t s_intr_handlers[32];
#endif
void intr_handler_set(int int_no, intr_handler_t fn, void *arg)
static inline void assert_valid_rv_int_num(int rv_int_num)
{
#if !SOC_INT_CLIC_SUPPORTED
assert(rv_int_num != 0 && "Invalid CPU interrupt number");
#endif
assert(rv_int_num < RV_EXTERNAL_INT_COUNT && "Invalid CPU interrupt number");
}
static intr_handler_item_t* intr_get_item(int int_no)
{ {
assert_valid_rv_int_num(int_no); assert_valid_rv_int_num(int_no);
#if SOC_INT_CLIC_SUPPORTED const uint32_t id = rv_utils_get_core_id();
if (rv_utils_get_core_id() == 0) {
s_intr_handlers_core0[int_no + CLIC_EXT_INTR_NUM_OFFSET] = (intr_handler_item_t) { return &s_intr_handlers[id][int_no];
.handler = fn,
.arg = arg,
};
} else {
s_intr_handlers_core1[int_no + CLIC_EXT_INTR_NUM_OFFSET] = (intr_handler_item_t) {
.handler = fn,
.arg = arg,
};
} }
#else
s_intr_handlers[int_no] = (intr_handler_item_t) { /*************************** Software interrupt dispatcher ***************************/
void intr_handler_set(int int_no, intr_handler_t fn, void *arg)
{
intr_handler_item_t* item = intr_get_item(int_no);
*item = (intr_handler_item_t) {
.handler = fn, .handler = fn,
.arg = arg .arg = arg
}; };
#endif
} }
intr_handler_t intr_handler_get(int rv_int_num) intr_handler_t intr_handler_get(int rv_int_num)
{ {
#if SOC_INT_CLIC_SUPPORTED const intr_handler_item_t* item = intr_get_item(rv_int_num);
if (rv_utils_get_core_id() == 0) return item->handler;
return s_intr_handlers_core0[rv_int_num + CLIC_EXT_INTR_NUM_OFFSET].handler;
else
return s_intr_handlers_core1[rv_int_num + CLIC_EXT_INTR_NUM_OFFSET].handler;
#else
return s_intr_handlers[rv_int_num].handler;
#endif
} }
void *intr_handler_get_arg(int rv_int_num) void *intr_handler_get_arg(int rv_int_num)
{ {
#if SOC_INT_CLIC_SUPPORTED const intr_handler_item_t* item = intr_get_item(rv_int_num);
if (rv_utils_get_core_id() == 0) return item->arg;
return s_intr_handlers_core0[rv_int_num + CLIC_EXT_INTR_NUM_OFFSET].arg;
else
return s_intr_handlers_core1[rv_int_num + CLIC_EXT_INTR_NUM_OFFSET].arg;
#else
return s_intr_handlers[rv_int_num].arg;
#endif
} }
/* called from vectors.S */ /* called from vectors.S */
void _global_interrupt_handler(intptr_t sp, int mcause) void _global_interrupt_handler(intptr_t sp, int mcause)
{ {
#if SOC_INT_CLIC_SUPPORTED /* mcause contains the interrupt number that triggered the current interrupt, this number
if (rv_utils_get_core_id() == 0) { * also take into account local/internal interrupt, however, this should not happen in practice,
intr_handler_item_t it = s_intr_handlers_core0[mcause]; * since we never map any peripheral to those. */
if (it.handler) { assert(mcause >= RV_EXTERNAL_INT_OFFSET && "Interrupt sources must not be mapped to local interrupts");
(*it.handler)(it.arg); const intr_handler_item_t* item = intr_get_item(mcause - RV_EXTERNAL_INT_OFFSET);
if (item->handler) {
(*item->handler)(item->arg);
} }
} else {
intr_handler_item_t it = s_intr_handlers_core1[mcause];
if (it.handler) {
(*it.handler)(it.arg);
}
}
#else
intr_handler_item_t it = s_intr_handlers[mcause];
if (it.handler) {
(*it.handler)(it.arg);
}
#endif
} }
/*************************** RISC-V interrupt enable/disable ***************************/ /*************************** RISC-V interrupt enable/disable ***************************/
void intr_matrix_route(int intr_src, int intr_num) void intr_matrix_route(int intr_src, int intr_num)
{ {
#if !SOC_INT_CLIC_SUPPORTED assert_valid_rv_int_num(intr_num);
assert(intr_num != 0);
REG_WRITE(DR_REG_INTERRUPT_BASE + 4 * intr_src, intr_num); if (rv_utils_get_core_id() == 0) {
#else REG_WRITE(DR_REG_INTERRUPT_CORE0_BASE + 4 * intr_src, intr_num + RV_EXTERNAL_INT_OFFSET);
if (rv_utils_get_core_id() == 0) }
REG_WRITE(DR_REG_INTERRUPT_CORE0_BASE + 4 * intr_src, intr_num + CLIC_EXT_INTR_NUM_OFFSET); #if SOC_CPU_CORES_NUM > 1
else else {
REG_WRITE(DR_REG_INTERRUPT_CORE1_BASE + 4 * intr_src, intr_num + CLIC_EXT_INTR_NUM_OFFSET); REG_WRITE(DR_REG_INTERRUPT_CORE1_BASE + 4 * intr_src, intr_num + RV_EXTERNAL_INT_OFFSET);
#endif }
#endif // SOC_CPU_CORES_NUM > 1
} }
// CLIC for each interrupt line provides a IE register // CLIC for each interrupt line provides a IE register
@@ -141,29 +133,50 @@ uint32_t esprv_intc_get_interrupt_unmask(void)
/*************************** ESP-RV Interrupt Controller ***************************/ /*************************** ESP-RV Interrupt Controller ***************************/
enum intr_type esprv_intc_int_get_type(int intr_num)
{
#if SOC_INT_CLIC_SUPPORTED #if SOC_INT_CLIC_SUPPORTED
uint32_t intr_type_reg = REG_GET_FIELD(CLIC_INT_CTRL_REG(intr_num + CLIC_EXT_INTR_NUM_OFFSET), CLIC_INT_ATTR_TRIG);
enum intr_type esprv_intc_int_get_type(int rv_int_num)
{
uint32_t intr_type_reg = REG_GET_FIELD(CLIC_INT_CTRL_REG(rv_int_num + RV_EXTERNAL_INT_OFFSET), CLIC_INT_ATTR_TRIG);
return (intr_type_reg & 1) ? INTR_TYPE_EDGE : INTR_TYPE_LEVEL; return (intr_type_reg & 1) ? INTR_TYPE_EDGE : INTR_TYPE_LEVEL;
// May also support rising edge and falling edge.
#else
uint32_t intr_type_reg = REG_READ(INTERRUPT_CORE0_CPU_INT_TYPE_REG);
return (intr_type_reg & (1 << intr_num)) ? INTR_TYPE_EDGE : INTR_TYPE_LEVEL;
#endif
} }
int esprv_intc_int_get_priority(int rv_int_num) int esprv_intc_int_get_priority(int rv_int_num)
{ {
#if SOC_INT_CLIC_SUPPORTED uint32_t intr_priority_reg = REG_GET_FIELD(CLIC_INT_CTRL_REG(rv_int_num + RV_EXTERNAL_INT_OFFSET), CLIC_INT_CTL);
uint32_t intr_priority_reg = REG_GET_FIELD(CLIC_INT_CTRL_REG(rv_int_num + CLIC_EXT_INTR_NUM_OFFSET), CLIC_INT_CTL);
return (intr_priority_reg >> (8 - NLBITS)); return (intr_priority_reg >> (8 - NLBITS));
#else }
bool esprv_intc_int_is_vectored(int rv_int_num)
{
const uint32_t shv = REG_GET_FIELD(CLIC_INT_CTRL_REG(rv_int_num + RV_EXTERNAL_INT_OFFSET), CLIC_INT_ATTR_SHV);
return shv != 0;
}
void esprv_intc_int_set_vectored(int rv_int_num, bool vectored)
{
REG_SET_FIELD(CLIC_INT_CTRL_REG(rv_int_num + RV_EXTERNAL_INT_OFFSET), CLIC_INT_ATTR_SHV, vectored ? 1 : 0);
}
#else // !SOC_INT_CLIC_SUPPORTED
enum intr_type esprv_intc_int_get_type(int rv_int_num)
{
uint32_t intr_type_reg = REG_READ(INTERRUPT_CORE0_CPU_INT_TYPE_REG);
return (intr_type_reg & (1 << rv_int_num)) ? INTR_TYPE_EDGE : INTR_TYPE_LEVEL;
}
int esprv_intc_int_get_priority(int rv_int_num)
{
uint32_t intr_priority_reg = REG_READ(INTC_INT_PRIO_REG(rv_int_num)); uint32_t intr_priority_reg = REG_READ(INTC_INT_PRIO_REG(rv_int_num));
return intr_priority_reg; return intr_priority_reg;
#endif
} }
#endif // SOC_INT_CLIC_SUPPORTED
/*************************** Exception names. Used in .gdbinit file. ***************************/ /*************************** Exception names. Used in .gdbinit file. ***************************/
const char *riscv_excp_names[16] __attribute__((used)) = { const char *riscv_excp_names[16] __attribute__((used)) = {

View File

@@ -9,6 +9,7 @@
#include "riscv/rvruntime-frames.h" #include "riscv/rvruntime-frames.h"
#include "soc/soc_caps.h" #include "soc/soc_caps.h"
#include "sdkconfig.h" #include "sdkconfig.h"
#include "esp_private/vectors_const.h"
.equ SAVE_REGS, 32 .equ SAVE_REGS, 32
@@ -60,7 +61,7 @@
/* Restore the general purpose registers (excluding gp) from the context on /* Restore the general purpose registers (excluding gp) from the context on
* the stack. The context is then deallocated. The default size is CONTEXT_SIZE * the stack. The context is then deallocated. The default size is CONTEXT_SIZE
* but it can be overriden. */ * but it can be overridden. */
.macro restore_general_regs cxt_size=CONTEXT_SIZE .macro restore_general_regs cxt_size=CONTEXT_SIZE
lw ra, RV_STK_RA(sp) lw ra, RV_STK_RA(sp)
lw tp, RV_STK_TP(sp) lw tp, RV_STK_TP(sp)
@@ -107,92 +108,10 @@
#endif #endif
.section .exception_vectors.text .section .exception_vectors.text
/* This is the vector table. MTVEC points here.
*
* Use 4-byte intructions here. 1 instruction = 1 entry of the table.
* The CPU jumps to MTVEC (i.e. the first entry) in case of an exception,
* and (MTVEC & 0xfffffffc) + (mcause & 0x7fffffff) * 4, in case of an interrupt.
*
* Note: for our CPU, we need to place this on a 256-byte boundary, as CPU
* only uses the 24 MSBs of the MTVEC, i.e. (MTVEC & 0xffffff00).
*/
/**
* TODO: IDF-7863, P4, see jira to know what changed and what need to be checked
*/
#if SOC_INT_CLIC_SUPPORTED
.balign 0x40
#else
.balign 0x100
#endif
.global _vector_table
.type _vector_table, @function
_vector_table:
.option push
.option norvc
#if SOC_INT_CLIC_SUPPORTED
j _trap_handler
#else
j _panic_handler /* exception handler, entry 0 */
#if ETS_INT_WDT_INUM != 24
#error "ETS_INT_WDT_INUM expected to be 24"
#endif
.rept (ETS_INT_WDT_INUM - 1)
j _interrupt_handler /* 23 identical entries, all pointing to the interrupt handler */
.endr
j _panic_handler /* 24: ETS_INT_WDT_INUM panic-interrupt (soc-level panic) */
j _panic_handler /* 25: ETS_CACHEERR_INUM panic-interrupt (soc-level panic) */
#if CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
j _panic_handler /* 26: ETS_MEMPROT_ERR_INUM panic-interrupt (soc-level panic) */
#else
j _interrupt_handler /* 26: interrupt-handler */
#endif // CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
j _panic_handler /* 27: ETS_ASSIST_DEBUG_INUM panic-interrupt (soc-level panic) */
#else
j _interrupt_handler /* 27: interrupt-handler */
#endif // CONFIG_ESP_SYSTEM_HW_STACK_GUARD
.rept (ETS_MAX_INUM - ETS_ASSIST_DEBUG_INUM)
j _interrupt_handler /* remain entries are identical, all pointing to the interrupt handler */
.endr
#endif
.option pop
.size _vector_table, .-_vector_table
#if SOC_INT_CLIC_SUPPORTED
.balign 0x40
.global _mtvt_table
.type _mtvt_table, @function
_mtvt_table:
.option push
.option norvc
.rept 48
.word _interrupt_handler
.endr
.option pop
.size _mtvt_table, .-_mtvt_table
#endif
#if SOC_INT_CLIC_SUPPORTED
.type _trap_handler, @function
_trap_handler:
addi sp, sp, -RV_STK_FRMSZ
sw t0, RV_STK_T0(sp)
sw t1, RV_STK_T1(sp)
csrr t0, mcause
li t1, 0x80000000
bltu t0, t1, _panic_handler
lw t0, RV_STK_T0(sp)
lw t1, RV_STK_T1(sp)
addi sp, sp, RV_STK_FRMSZ
//ESP32P4-TODO: ETS_T1_WDT_INUM/ETS_CACHEERR_INUM/ETS_MEMPROT_ERR_INUM
j _interrupt_handler
.size _trap_handler, .-_trap_handler
#endif
/* Exception handler.*/ /* Exception handler.*/
.type _panic_handler, @function .type _panic_handler, @function
.global _panic_handler
_panic_handler: _panic_handler:
/* Allocate space on the stack and store general purpose registers */ /* Allocate space on the stack and store general purpose registers */
save_general_regs RV_STK_FRMSZ save_general_regs RV_STK_FRMSZ
@@ -224,16 +143,11 @@ _panic_handler:
mv a0, sp mv a0, sp
csrr a1, mcause csrr a1, mcause
/* /* Only keep the interrupt bit and the source cause of the trap */
* MINHV[30]: CPU is fetching vector interrupt entry address or not li t1, VECTORS_MCAUSE_INTBIT_MASK | VECTORS_MCAUSE_REASON_MASK
* MPP[29:28]: MSTATUS.MPP[1:0]
* MPIL[23:16]: interrupt level before entrering interrupt isr
*/
#if SOC_INT_CLIC_SUPPORTED
la t1, 0x80000fff
and a1, a1, t1 and a1, a1, t1
#endif
/* Branches instructions don't accept immediates values, so use t1 to /* Branches instructions don't accept immediate values, so use t1 to
* store our comparator */ * store our comparator */
li t0, 0x80000000 li t0, 0x80000000
bgeu a1, t0, _call_panic_handler bgeu a1, t0, _call_panic_handler
@@ -258,6 +172,10 @@ _call_panic_handler:
* structure */ * structure */
not t0, t0 not t0, t0
and a1, a1, t0 and a1, a1, t0
#if CONFIG_SOC_INT_CLIC_SUPPORTED
/* When CLIC is supported, external interrupts are shifted by 16, deduct this difference from mcause */
add a1, a1, -16
#endif // CONFIG_SOC_INT_CLIC_SUPPORTED
sw a1, RV_STK_MCAUSE(sp) sw a1, RV_STK_MCAUSE(sp)
jal panic_from_isr jal panic_from_isr
@@ -275,6 +193,7 @@ _return_from_exception:
mret mret
.size _panic_handler, .-_panic_handler .size _panic_handler, .-_panic_handler
/* This is the interrupt handler. /* This is the interrupt handler.
* It saves the registers on the stack, * It saves the registers on the stack,
* prepares for interrupt nesting, * prepares for interrupt nesting,
@@ -300,12 +219,8 @@ _interrupt_handler:
/* Save SP */ /* Save SP */
sw t0, RV_STK_SP(sp) sw t0, RV_STK_SP(sp)
#if CONFIG_IDF_TARGET_ESP32P4 /* Notify the RTOS that an interrupt ocurred, it will save the current stack pointer
//TODO: IDF-7861 * in the running TCB, no need to pass it as a parameter */
/* Before doing anythig preserve the stack pointer */
/* It will be saved in current TCB, if needed */
mv a0, sp
#endif //#if CONFIG_IDF_TARGET_ESP32P4
call rtos_int_enter call rtos_int_enter
/* If this is a non-nested interrupt, SP now points to the interrupt stack */ /* If this is a non-nested interrupt, SP now points to the interrupt stack */
@@ -313,13 +228,13 @@ _interrupt_handler:
csrr s1, mcause csrr s1, mcause
csrr s2, mstatus csrr s2, mstatus
#if !SOC_INT_CLIC_SUPPORTED #if !SOC_INT_HW_NESTED_SUPPORTED
/* Save the interrupt threshold level */ /* Save the interrupt threshold level */
li t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG li t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
lw s3, 0(t0) lw s3, 0(t0)
/* Increase interrupt threshold level */ /* Increase interrupt threshold level */
li t2, 0x7fffffff li t2, VECTORS_MCAUSE_REASON_MASK
and t1, s1, t2 /* t1 = mcause & mask */ and t1, s1, t2 /* t1 = mcause & mask */
slli t1, t1, 2 /* t1 = mcause * 4 */ slli t1, t1, 2 /* t1 = mcause * 4 */
li t2, INTC_INT_PRIO_REG(0) li t2, INTC_INT_PRIO_REG(0)
@@ -328,7 +243,7 @@ _interrupt_handler:
addi t2, t2, 1 /* t2 = t2 +1 */ addi t2, t2, 1 /* t2 = t2 +1 */
sw t2, 0(t0) /* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */ sw t2, 0(t0) /* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
fence fence
#endif #endif // !SOC_INT_HW_NESTED_SUPPORTED
li t0, 0x8 li t0, 0x8
csrrs t0, mstatus, t0 csrrs t0, mstatus, t0
@@ -354,11 +269,7 @@ _interrupt_handler:
mv a0, sp /* argument 1, stack pointer */ mv a0, sp /* argument 1, stack pointer */
mv a1, s1 /* argument 2, interrupt number (mcause) */ mv a1, s1 /* argument 2, interrupt number (mcause) */
/* mask off the interrupt flag of mcause */ /* mask off the interrupt flag of mcause */
#if !SOC_INT_CLIC_SUPPORTED li t0, VECTORS_MCAUSE_REASON_MASK
li t0, 0x7fffffff
#else
li t0, 0x00000fff
#endif
and a1, a1, t0 and a1, a1, t0
jal _global_interrupt_handler jal _global_interrupt_handler
@@ -368,26 +279,20 @@ _interrupt_handler:
csrrc t0, mstatus, t0 csrrc t0, mstatus, t0
/* MIE cleared. Nested interrupts are disabled */ /* MIE cleared. Nested interrupts are disabled */
#if !SOC_INT_CLIC_SUPPORTED #if !SOC_INT_HW_NESTED_SUPPORTED
/* restore the interrupt threshold level */ /* restore the interrupt threshold level */
li t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG li t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
sw s3, 0(t0) sw s3, 0(t0)
fence fence
#endif #endif // !SOC_INT_HW_NESTED_SUPPORTED
#if SOC_INT_CLIC_SUPPORTED /* The RTOS will restore the current TCB stack pointer. This routine will preserve s1 and s2 but alter s0. */
/* Yield to the next task is needed: */
mv a0, sp
#endif
call rtos_int_exit call rtos_int_exit
#if CONFIG_IDF_TARGET_ESP32P4 /* Restore the rest of the registers.
//TODO: IDF-7861 * In case the target uses the CLIC, it is mandatory to restore `mcause` register since it contains
/* The next (or current) stack pointer is returned in a0 */ * the former CPU priority. When executing `mret`, the hardware will restore the former threshold,
mv sp, a0 * from `mcause` to `mintstatus` CSR */
#endif //#if CONFIG_IDF_TARGET_ESP32P4
/* restore the rest of the registers */
csrw mcause, s1 csrw mcause, s1
csrw mstatus, s2 csrw mstatus, s2
restore_mepc restore_mepc

View File

@@ -0,0 +1,113 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include "soc/soc.h"
/* If memory protection interrupts are meant to trigger a panic, attach them to panic handler,
* else, attach them to the interrupt handler. */
#if CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
#define MEMPROT_ISR _panic_handler
#else
#define MEMPROT_ISR _interrupt_handler
#endif // CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
/* The system interrupts are not used for now, so trigger a panic every time one occurs. */
#define _system_int_handler _panic_handler
/* Handlers defined in the `vector.S` file, common to all RISC-V targets */
.global _interrupt_handler
.global _panic_handler
.section .exception_vectors_table.text
/* Prevent the compiler from generating 2-byte instruction in the vector tables */
.option push
.option norvc
/**
* Non-hardware vectored interrupt entry. MTVEC CSR points here.
*
* On targets that use CLIC as their interrupt controller, when an exception occurs, the CPU
* jumps to the address stored in MTVEC[31:6] << 6. The CPU will also jump to this location
* if an interrupt is configured as non-vectored (CLIC_INT_ATTR.shv = 0).
*
* Because of the left-shift `<< 6`, this entry must be aligned on 64.
*/
.global _vector_table
.type _vector_table, @function
.balign 0x40
_vector_table:
j _panic_handler
.size _vector_table, .-_vector_table
/**
* Vectored interrupt table. MTVT CSR points here.
*
* If an interrupt occurs and is configured as (hardware) vectored, the CPU will jump to
* MTVT[31:0] + 4 * interrupt_id
*
* In the case of the ESP32P4, the interrupt matrix, between the CPU interrupt lines
* and the peripherals, offers 32 lines. As such, the interrupt_id between 0 and 31.
*
* Since the interrupts are initialized as vectored on CPU start, we can manage the special
* interrupts ETS_T1_WDT_INUM, ETS_CACHEERR_INUM and ETS_MEMPROT_ERR_INUM here.
*/
.balign 0x40
.global _mtvt_table
.type _mtvt_table, @function
_mtvt_table:
.word _system_int_handler /* 0: System interrupt number. Exceptions are non-vectored, won't load this. */
.word _system_int_handler /* 1: System interrupt number */
.word _system_int_handler /* 2: System interrupt number */
.word _system_int_handler /* 3: System interrupt number */
.word _system_int_handler /* 4: System interrupt number */
.word _system_int_handler /* 5: System interrupt number */
.word _system_int_handler /* 6: System interrupt number */
.word _system_int_handler /* 7: System interrupt number */
.word _system_int_handler /* 8: System interrupt number */
.word _system_int_handler /* 9: System interrupt number */
.word _system_int_handler /* 10: System interrupt number */
.word _system_int_handler /* 11: System interrupt number */
.word _system_int_handler /* 12: System interrupt number */
.word _system_int_handler /* 13: System interrupt number */
.word _system_int_handler /* 14: System interrupt number */
.word _system_int_handler /* 15: System interrupt number */
.word _interrupt_handler /* 16: Free interrupt number */
.word _interrupt_handler /* 17: Free interrupt number */
.word _interrupt_handler /* 18: Free interrupt number */
.word _interrupt_handler /* 19: Free interrupt number */
.word _interrupt_handler /* 20: Free interrupt number */
.word _interrupt_handler /* 21: Free interrupt number */
.word _interrupt_handler /* 22: Free interrupt number */
.word _interrupt_handler /* 23: Free interrupt number */
.word _interrupt_handler /* 24: Free interrupt number */
.word _interrupt_handler /* 25: Free interrupt number */
.word _interrupt_handler /* 26: Free interrupt number */
.word _interrupt_handler /* 27: Free interrupt number */
.word _interrupt_handler /* 28: Free interrupt number */
.word _interrupt_handler /* 29: Free interrupt number */
.word _interrupt_handler /* 30: Free interrupt number */
.word _interrupt_handler /* 31: Free interrupt number */
.word _interrupt_handler /* 32: Free interrupt number */
.word _interrupt_handler /* 33: Free interrupt number */
.word _interrupt_handler /* 34: Free interrupt number */
.word _interrupt_handler /* 35: Free interrupt number */
.word _interrupt_handler /* 36: Free interrupt number */
.word _interrupt_handler /* 37: Free interrupt number */
.word _interrupt_handler /* 38: Free interrupt number */
.word _interrupt_handler /* 39: Free interrupt number */
.word _panic_handler /* 40: ETS_INT_WDT_INUM (+16) panic-interrupt (soc-level panic) */
.word _panic_handler /* 41: ETS_CACHEERR_INUM (+16) panic-interrupt (soc-level panic) */
.word MEMPROT_ISR /* 42: ETS_MEMPROT_ERR_INUM (+16) handler (soc-level panic) */
.word _interrupt_handler /* 43: Free interrupt number */
.word _interrupt_handler /* 44: Free interrupt number */
.word _interrupt_handler /* 45: Free interrupt number */
.word _interrupt_handler /* 46: Free interrupt number */
.word _interrupt_handler /* 47: Free interrupt number */
.size _mtvt_table, .-_mtvt_table
.option pop

View File

@@ -0,0 +1,90 @@
/*
* SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#include "sdkconfig.h"
#include "soc/soc.h"
#if ETS_INT_WDT_INUM != 24
#error "ETS_INT_WDT_INUM expected to be 24"
#endif
/* If memory protection interrupts are meant to trigger a panic, attach them to panic handler,
* else, attach them to the interrupt handler. */
#if CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
#define MEMPROT_ISR _panic_handler
#else
#define MEMPROT_ISR _interrupt_handler
#endif // CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
/* Same goes for the assist debug interrupt */
#if CONFIG_ESP_SYSTEM_HW_STACK_GUARD
#define ASTDBG_ISR _panic_handler
#else
#define ASTDBG_ISR _interrupt_handler
#endif // CONFIG_ESP_SYSTEM_HW_STACK_GUARD
/* Handlers defined in the `vector.S` file, common to all RISC-V targets */
.global _interrupt_handler
.global _panic_handler
.section .exception_vectors_table.text
/* This is the vector table. MTVEC points here.
*
* Use 4-byte instructions here. 1 instruction = 1 entry of the table.
* The CPU jumps to MTVEC (i.e. the first entry) in case of an exception,
* and (MTVEC & 0xfffffffc) + (mcause & 0x7fffffff) * 4, in case of an interrupt.
*
* Note: for our CPU, we need to place this on a 256-byte boundary, as CPU
* only uses the 24 MSBs of the MTVEC, i.e. (MTVEC & 0xffffff00).
*/
.balign 0x100
/* Since each entry must take 4-byte, let's temporarily disable the compressed
* instruction set that could potentially generate 2-byte instructions. */
.option push
.option norvc
.global _vector_table
.type _vector_table, @function
_vector_table:
j _panic_handler /* 0: Exception entry */
j _interrupt_handler /* 1: Free interrupt number */
j _interrupt_handler /* 2: Free interrupt number */
j _interrupt_handler /* 3: Free interrupt number */
j _interrupt_handler /* 4: Free interrupt number */
j _interrupt_handler /* 5: Free interrupt number */
j _interrupt_handler /* 6: Free interrupt number */
j _interrupt_handler /* 7: Free interrupt number */
j _interrupt_handler /* 8: Free interrupt number */
j _interrupt_handler /* 9: Free interrupt number */
j _interrupt_handler /* 10: Free interrupt number */
j _interrupt_handler /* 11: Free interrupt number */
j _interrupt_handler /* 12: Free interrupt number */
j _interrupt_handler /* 13: Free interrupt number */
j _interrupt_handler /* 14: Free interrupt number */
j _interrupt_handler /* 15: Free interrupt number */
j _interrupt_handler /* 16: Free interrupt number */
j _interrupt_handler /* 17: Free interrupt number */
j _interrupt_handler /* 18: Free interrupt number */
j _interrupt_handler /* 19: Free interrupt number */
j _interrupt_handler /* 20: Free interrupt number */
j _interrupt_handler /* 21: Free interrupt number */
j _interrupt_handler /* 22: Free interrupt number */
j _interrupt_handler /* 23: Free interrupt number */
j _panic_handler /* 24: ETS_INT_WDT_INUM panic-interrupt (soc-level panic) */
j _panic_handler /* 25: ETS_CACHEERR_INUM panic-interrupt (soc-level panic) */
j MEMPROT_ISR /* 26: ETS_MEMPROT_ERR_INUM handler (soc-level panic) */
j ASTDBG_ISR /* 27: ETS_ASSIST_DEBUG_INUM handler (soc-level panic) */
j _interrupt_handler /* 28: Free interrupt number */
j _interrupt_handler /* 29: Free interrupt number */
j _interrupt_handler /* 30: Free interrupt number */
j _interrupt_handler /* 31: Free interrupt number */
.size _vector_table, .-_vector_table
/* Re-enable the compressed instruction set it is was enabled before */
.option pop

View File

@@ -207,6 +207,10 @@ config SOC_INT_CLIC_SUPPORTED
bool bool
default y default y
config SOC_INT_HW_NESTED_SUPPORTED
bool
default y
config SOC_BRANCH_PREDICTOR_SUPPORTED config SOC_BRANCH_PREDICTOR_SUPPORTED
bool bool
default y default y

View File

@@ -3,10 +3,30 @@
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
*/ */
#pragma once
#include "soc/clic_reg.h" #include "soc/clic_reg.h"
#include "soc/soc_caps.h" #include "soc/soc_caps.h"
// ESP32P4 uses the CLIC controller as the interrupt controller (SOC_INT_CLIC_SUPPORTED = y) #ifdef __cplusplus
#define INTERRUPT_CORE0_CPU_INT_THRESH_REG CLIC_INT_THRESH_REG extern "C" {
#define INTERRUPT_CORE1_CPU_INT_THRESH_REG CLIC_INT_THRESH_REG #endif
/**
* ESP32P4 uses the CLIC controller as the interrupt controller (SOC_INT_CLIC_SUPPORTED = y)
*
* The memory map for interrupt registers is on a per-core basis, CLIC_INT_THRESH_REG points to
* the current core interrupt register, whereas CLIC_INT_THRESH_REG + DUALCORE_CLIC_CTRL_OFF points
* to the other core registers, regardless of the core we are currently running on.
*/
#define INTERRUPT_CURRENT_CORE_INT_THRESH_REG (CLIC_INT_THRESH_REG)
#define INTERRUPT_OTHER_CORE_INT_THRESH_REG (CLIC_INT_THRESH_REG + DUALCORE_CLIC_CTRL_OFF)
#define INTERRUPT_CORE0_CPU_INT_THRESH_REG (rv_utils_get_core_id() == 0 ? INTERRUPT_CURRENT_CORE_INT_THRESH_REG : INTERRUPT_OTHER_CORE_INT_THRESH_REG)
#define INTERRUPT_CORE1_CPU_INT_THRESH_REG (rv_utils_get_core_id() == 1 ? INTERRUPT_CURRENT_CORE_INT_THRESH_REG : INTERRUPT_OTHER_CORE_INT_THRESH_REG)
#ifdef __cplusplus
}
#endif

View File

@@ -230,7 +230,7 @@
//On RISC-V CPUs, the interrupt sources are all external interrupts, whose type, source and priority are configured by SW. //On RISC-V CPUs, the interrupt sources are all external interrupts, whose type, source and priority are configured by SW.
//There is no HW NMI conception. SW should controlled the masked levels through INT_THRESH_REG. //There is no HW NMI conception. SW should controlled the masked levels through INT_THRESH_REG.
//CPU0 Interrupt number reserved in riscv/vector.S, not touch this. //CPU0 Interrupt number reserved in riscv/vector_clic.S, do not touch this.
#define ETS_T1_WDT_INUM 24 #define ETS_T1_WDT_INUM 24
#define ETS_CACHEERR_INUM 25 #define ETS_CACHEERR_INUM 25
#define ETS_MEMPROT_ERR_INUM 26 #define ETS_MEMPROT_ERR_INUM 26

View File

@@ -137,6 +137,7 @@
#define SOC_CPU_HAS_FLEXIBLE_INTC 1 #define SOC_CPU_HAS_FLEXIBLE_INTC 1
#define SOC_INT_PLIC_SUPPORTED 0 //riscv platform-level interrupt controller #define SOC_INT_PLIC_SUPPORTED 0 //riscv platform-level interrupt controller
#define SOC_INT_CLIC_SUPPORTED 1 #define SOC_INT_CLIC_SUPPORTED 1
#define SOC_INT_HW_NESTED_SUPPORTED 1 // Support for hardware interrupts nesting
#define SOC_BRANCH_PREDICTOR_SUPPORTED 1 #define SOC_BRANCH_PREDICTOR_SUPPORTED 1
#define SOC_CPU_BREAKPOINTS_NUM 4 #define SOC_CPU_BREAKPOINTS_NUM 4

View File

@@ -7,132 +7,132 @@
#include "soc/interrupts.h" #include "soc/interrupts.h"
const char *const esp_isr_names[] = { const char *const esp_isr_names[] = {
[0] = "LP_RTC", [ETS_LP_RTC_INTR_SOURCE] = "LP_RTC",
[1] = "LP_WDT", [ETS_LP_WDT_INTR_SOURCE] = "LP_WDT",
[2] = "LP_TIMER0", [ETS_LP_TIMER_REG0_INTR_SOURCE] = "LP_TIMER_REG0",
[3] = "LP_TIMER1", [ETS_LP_TIMER_REG1_INTR_SOURCE] = "LP_TIMER_REG1",
[4] = "MB_HP", [ETS_MB_HP_INTR_SOURCE] = "MB_HP",
[5] = "MB_LP", [ETS_MB_LP_INTR_SOURCE] = "MB_LP",
[6] = "PMU0", [ETS_PMU_0_INTR_SOURCE] = "PMU_0",
[7] = "PMU1", [ETS_PMU_1_INTR_SOURCE] = "PMU_1",
[8] = "LP_ANA", [ETS_LP_ANAPERI_INTR_SOURCE] = "LP_ANAPERI",
[9] = "LP_ADC", [ETS_LP_ADC_INTR_SOURCE] = "LP_ADC",
[10] = "LP_GPIO", [ETS_LP_GPIO_INTR_SOURCE] = "LP_GPIO",
[11] = "LP_I2C", [ETS_LP_I2C_INTR_SOURCE] = "LP_I2C",
[12] = "LP_I2S", [ETS_LP_I2S_INTR_SOURCE] = "LP_I2S",
[13] = "LP_SPI", [ETS_LP_SPI_INTR_SOURCE] = "LP_SPI",
[14] = "LP_TOUCH", [ETS_LP_TOUCH_INTR_SOURCE] = "LP_TOUCH",
[15] = "LP_TSENS", [ETS_LP_TSENS_INTR_SOURCE] = "LP_TSENS",
[16] = "LP_UART", [ETS_LP_UART_INTR_SOURCE] = "LP_UART",
[17] = "LP_EFUSE", [ETS_LP_EFUSE_INTR_SOURCE] = "LP_EFUSE",
[18] = "LP_SW", [ETS_LP_SW_INTR_SOURCE] = "LP_SW",
[19] = "LP_SYSREG", [ETS_LP_SYSREG_INTR_SOURCE] = "LP_SYSREG",
[20] = "LP_HUK", [ETS_LP_HUK_INTR_SOURCE] = "LP_HUK",
[21] = "SYS_ICM", [ETS_SYS_ICM_INTR_SOURCE] = "SYS_ICM",
[22] = "USB_DEVICE", [ETS_USB_DEVICE_INTR_SOURCE] = "USB_DEVICE",
[23] = "SDIO_HOST", [ETS_SDIO_HOST_INTR_SOURCE] = "SDIO_HOST",
[24] = "GDMA", [ETS_GDMA_INTR_SOURCE] = "GDMA",
[25] = "GPSPI2", [ETS_SPI2_INTR_SOURCE] = "SPI2",
[26] = "GPSPI3", [ETS_SPI3_INTR_SOURCE] = "SPI3",
[27] = "I2S0", [ETS_I2S0_INTR_SOURCE] = "I2S0",
[28] = "I2S1", [ETS_I2S1_INTR_SOURCE] = "I2S1",
[29] = "I2S2", [ETS_I2S2_INTR_SOURCE] = "I2S2",
[30] = "UHCI0", [ETS_UHCI0_INTR_SOURCE] = "UHCI0",
[31] = "UART0", [ETS_UART0_INTR_SOURCE] = "UART0",
[32] = "UART1", [ETS_UART1_INTR_SOURCE] = "UART1",
[33] = "UART2", [ETS_UART2_INTR_SOURCE] = "UART2",
[34] = "UART3", [ETS_UART3_INTR_SOURCE] = "UART3",
[35] = "UART4", [ETS_UART4_INTR_SOURCE] = "UART4",
[36] = "LCD_CAM", [ETS_LCD_CAM_INTR_SOURCE] = "LCD_CAM",
[37] = "ADC", [ETS_ADC_INTR_SOURCE] = "ADC",
[38] = "PWM0", [ETS_PWM0_INTR_SOURCE] = "PWM0",
[39] = "PWM1", [ETS_PWM1_INTR_SOURCE] = "PWM1",
[40] = "CAN0", [ETS_CAN0_INTR_SOURCE] = "CAN0",
[41] = "CAN1", [ETS_CAN1_INTR_SOURCE] = "CAN1",
[42] = "CAN2", [ETS_CAN2_INTR_SOURCE] = "CAN2",
[43] = "RMT", [ETS_RMT_INTR_SOURCE] = "RMT",
[44] = "I2C0", [ETS_I2C0_INTR_SOURCE] = "I2C0",
[45] = "I2C1", [ETS_I2C1_INTR_SOURCE] = "I2C1",
[46] = "TG0_T0", [ETS_TG0_T0_INTR_SOURCE] = "TG0_T0",
[47] = "TG0_T1", [ETS_TG0_T1_INTR_SOURCE] = "TG0_T1",
[48] = "TG0_WDT", [ETS_TG0_WDT_LEVEL_INTR_SOURCE] = "TG0_WDT_LEVEL",
[49] = "TG1_T0", [ETS_TG1_T0_INTR_SOURCE] = "TG1_T0",
[50] = "TG1_T1", [ETS_TG1_T1_INTR_SOURCE] = "TG1_T1",
[51] = "TG1_WDT", [ETS_TG1_WDT_LEVEL_INTR_SOURCE] = "TG1_WDT_LEVEL",
[52] = "LEDC", [ETS_LEDC_INTR_SOURCE] = "LEDC",
[53] = "SYSTIMER_TARGET0", [ETS_SYSTIMER_TARGET0_INTR_SOURCE] = "SYSTIMER_TARGET0",
[54] = "SYSTIMER_TARGET1", [ETS_SYSTIMER_TARGET1_INTR_SOURCE] = "SYSTIMER_TARGET1",
[55] = "SYSTIMER_TARGET2", [ETS_SYSTIMER_TARGET2_INTR_SOURCE] = "SYSTIMER_TARGET2",
[56] = "AHB_PDMA_IN_CH0", [ETS_AHB_PDMA_IN_CH0_INTR_SOURCE] = "AHB_PDMA_IN_CH0",
[57] = "AHB_PDMA_IN_CH1", [ETS_AHB_PDMA_IN_CH1_INTR_SOURCE] = "AHB_PDMA_IN_CH1",
[58] = "AHB_PDMA_IN_CH2", [ETS_AHB_PDMA_IN_CH2_INTR_SOURCE] = "AHB_PDMA_IN_CH2",
[59] = "AHB_PDMA_OUT_CH0", [ETS_AHB_PDMA_OUT_CH0_INTR_SOURCE] = "AHB_PDMA_OUT_CH0",
[60] = "AHB_PDMA_OUT_CH1", [ETS_AHB_PDMA_OUT_CH1_INTR_SOURCE] = "AHB_PDMA_OUT_CH1",
[61] = "AHB_PDMA_OUT_CH2", [ETS_AHB_PDMA_OUT_CH2_INTR_SOURCE] = "AHB_PDMA_OUT_CH2",
[62] = "AXI_PDMA_IN_CH0", [ETS_AXI_PDMA_IN_CH0_INTR_SOURCE] = "AXI_PDMA_IN_CH0",
[63] = "AXI_PDMA_IN_CH1", [ETS_AXI_PDMA_IN_CH1_INTR_SOURCE] = "AXI_PDMA_IN_CH1",
[64] = "AXI_PDMA_IN_CH2", [ETS_AXI_PDMA_IN_CH2_INTR_SOURCE] = "AXI_PDMA_IN_CH2",
[65] = "AXI_PDMA_OUT_CH0", [ETS_AXI_PDMA_OUT_CH0_INTR_SOURCE] = "AXI_PDMA_OUT_CH0",
[66] = "AXI_PDMA_OUT_CH1", [ETS_AXI_PDMA_OUT_CH1_INTR_SOURCE] = "AXI_PDMA_OUT_CH1",
[67] = "AXI_PDMA_OUT_CH2", [ETS_AXI_PDMA_OUT_CH2_INTR_SOURCE] = "AXI_PDMA_OUT_CH2",
[68] = "RSA", [ETS_RSA_INTR_SOURCE] = "RSA",
[69] = "AES", [ETS_AES_INTR_SOURCE] = "AES",
[70] = "SHA", [ETS_SHA_INTR_SOURCE] = "SHA",
[71] = "ECC", [ETS_ECC_INTR_SOURCE] = "ECC",
[72] = "ECDSA", [ETS_ECDSA_INTR_SOURCE] = "ECDSA",
[73] = "KM", [ETS_KM_INTR_SOURCE] = "KM",
[74] = "GPIO_INT0", [ETS_GPIO_INTR0_SOURCE] = "GPIO_INT0",
[75] = "GPIO_INT1", [ETS_GPIO_INTR1_SOURCE] = "GPIO_INT1",
[76] = "GPIO_INT2", [ETS_GPIO_INTR2_SOURCE] = "GPIO_INT2",
[77] = "GPIO_INT3", [ETS_GPIO_INTR3_SOURCE] = "GPIO_INT3",
[78] = "GPIO_PAD_COMP", [ETS_GPIO_PAD_COMP_INTR_SOURCE] = "GPIO_PAD_COMP",
[79] = "CPU_INT_FROM_CPU_0", [ETS_FROM_CPU_INTR0_SOURCE] = "CPU_INT_FROM_CPU_0",
[80] = "CPU_INT_FROM_CPU_1", [ETS_FROM_CPU_INTR1_SOURCE] = "CPU_INT_FROM_CPU_1",
[81] = "CPU_INT_FROM_CPU_2", [ETS_FROM_CPU_INTR2_SOURCE] = "CPU_INT_FROM_CPU_2",
[82] = "CPU_INT_FROM_CPU_3", [ETS_FROM_CPU_INTR3_SOURCE] = "CPU_INT_FROM_CPU_3",
[83] = "CACHE", [ETS_CACHE_INTR_SOURCE] = "CACHE",
[84] = "FLASH_MSPI", [ETS_MSPI_INTR_SOURCE] = "MSPI",
[85] = "CSI_BRIDGE", [ETS_CSI_BRIDGE_INTR_SOURCE] = "CSI_BRIDGE",
[86] = "DSI_BRIDGE", [ETS_DSI_BRIDGE_INTR_SOURCE] = "DSI_BRIDGE",
[87] = "CSI", [ETS_CSI_INTR_SOURCE] = "CSI",
[88] = "DSI", [ETS_DSI_INTR_SOURCE] = "DSI",
[89] = "GMII_PHY", [ETS_GMII_PHY_INTR_SOURCE] = "GMII_PHY",
[90] = "LPI", [ETS_LPI_INTR_SOURCE] = "LPI",
[91] = "PMT", [ETS_PMT_INTR_SOURCE] = "PMT",
[92] = "SBD", [ETS_SBD_INTR_SOURCE] = "SBD",
[93] = "USB_OTG", [ETS_USB_OTG_INTR_SOURCE] = "USB_OTG",
[94] = "USB_OTG_ENDP_MULTI_PROC", [ETS_USB_OTG_ENDP_MULTI_PROC_INTR_SOURCE] = "USB_OTG_ENDP_MULTI_PROC",
[95] = "JPEG", [ETS_JPEG_INTR_SOURCE] = "JPEG",
[96] = "PPA", [ETS_PPA_INTR_SOURCE] = "PPA",
[97] = "CORE0_TRACE", [ETS_CORE0_TRACE_INTR_SOURCE] = "CORE0_TRACE",
[98] = "CORE1_TRACE", [ETS_CORE1_TRACE_INTR_SOURCE] = "CORE1_TRACE",
[99] = "HP_CORE", [ETS_HP_CORE_CTRL_INTR_SOURCE] = "HP_CORE_CTRL",
[100] = "ISP", [ETS_ISP_INTR_SOURCE] = "ISP",
[101] = "I3C", [ETS_I3C_MST_INTR_SOURCE] = "I3C_MST",
[102] = "I3C_SLV", [ETS_I3C_SLV_INTR_SOURCE] = "I3C_SLV",
[103] = "USB_OTG11", [ETS_USB_OTG11_CH0_INTR_SOURCE] = "USB_OTG11_CH0",
[104] = "DMA2D_IN_CH0", [ETS_DMA2D_IN_CH0_INTR_SOURCE] = "DMA2D_IN_CH0",
[105] = "DMA2D_IN_CH1", [ETS_DMA2D_IN_CH1_INTR_SOURCE] = "DMA2D_IN_CH1",
[106] = "DMA2D_OUT_CH0", [ETS_DMA2D_OUT_CH0_INTR_SOURCE] = "DMA2D_OUT_CH0",
[107] = "DMA2D_OUT_CH1", [ETS_DMA2D_OUT_CH1_INTR_SOURCE] = "DMA2D_OUT_CH1",
[108] = "DMA2D_OUT_CH2", [ETS_DMA2D_OUT_CH2_INTR_SOURCE] = "DMA2D_OUT_CH2",
[109] = "PSRAM_MSPI", [ETS_PSRAM_MSPI_INTR_SOURCE] = "PSRAM_MSPI",
[110] = "HP_SYSREG", [ETS_HP_SYSREG_INTR_SOURCE] = "HP_SYSREG",
[111] = "PCNT", [ETS_PCNT_INTR_SOURCE] = "PCNT",
[112] = "HP_PAU", [ETS_HP_PAU_INTR_SOURCE] = "HP_PAU",
[113] = "HP_PARLIO_RX", [ETS_HP_PARLIO_RX_INTR_SOURCE] = "HP_PARLIO_RX",
[114] = "HP_PARLIO_TX", [ETS_HP_PARLIO_TX_INTR_SOURCE] = "HP_PARLIO_TX",
[115] = "H264_DMA2D_OUT_CH0", [ETS_H264_DMA2D_OUT_CH0_INTR_SOURCE] = "H264_DMA2D_OUT_CH0",
[116] = "H264_DMA2D_OUT_CH1", [ETS_H264_DMA2D_OUT_CH1_INTR_SOURCE] = "H264_DMA2D_OUT_CH1",
[117] = "H264_DMA2D_OUT_CH2", [ETS_H264_DMA2D_OUT_CH2_INTR_SOURCE] = "H264_DMA2D_OUT_CH2",
[118] = "H264_DMA2D_OUT_CH3", [ETS_H264_DMA2D_OUT_CH3_INTR_SOURCE] = "H264_DMA2D_OUT_CH3",
[119] = "H264_DMA2D_OUT_CH4", [ETS_H264_DMA2D_OUT_CH4_INTR_SOURCE] = "H264_DMA2D_OUT_CH4",
[120] = "H264_DMA2D_IN_CH0", [ETS_H264_DMA2D_IN_CH0_INTR_SOURCE] = "H264_DMA2D_IN_CH0",
[121] = "H264_DMA2D_IN_CH1", [ETS_H264_DMA2D_IN_CH1_INTR_SOURCE] = "H264_DMA2D_IN_CH1",
[122] = "H264_DMA2D_IN_CH2", [ETS_H264_DMA2D_IN_CH2_INTR_SOURCE] = "H264_DMA2D_IN_CH2",
[123] = "H264_DMA2D_IN_CH3", [ETS_H264_DMA2D_IN_CH3_INTR_SOURCE] = "H264_DMA2D_IN_CH3",
[124] = "H264_DMA2D_IN_CH4", [ETS_H264_DMA2D_IN_CH4_INTR_SOURCE] = "H264_DMA2D_IN_CH4",
[125] = "H264_DMA2D_IN_CH5", [ETS_H264_DMA2D_IN_CH5_INTR_SOURCE] = "H264_DMA2D_IN_CH5",
[126] = "H264_REG", [ETS_H264_REG_INTR_SOURCE] = "H264_REG",
[127] = "ASSIST_DEBUG", [ETS_ASSIST_DEBUG_INTR_SOURCE] = "ASSIST_DEBUG",
}; };