Merge branch 'bugfix/interrupted_thread_gdb_bt_v4.3' into 'release/v4.3'

riscv: Fixes GDB backtrace of interrupted threads (v4.3)

See merge request espressif/esp-idf!17722
This commit is contained in:
Jiang Jiang Jian
2022-04-23 13:56:51 +08:00
4 changed files with 421 additions and 250 deletions

View File

@@ -161,7 +161,7 @@ void vPortSetupTimer(void)
systimer_hal_enable_alarm_int(SYSTIMER_ALARM_0); systimer_hal_enable_alarm_int(SYSTIMER_ALARM_0);
} }
void prvTaskExitError(void) __attribute__((noreturn)) static void _prvTaskExitError(void)
{ {
/* A function that implements a task must not exit or attempt to return to /* A function that implements a task must not exit or attempt to return to
its caller as there is nothing to return to. If a task wants to exit it its caller as there is nothing to return to. If a task wants to exit it
@@ -174,6 +174,18 @@ void prvTaskExitError(void)
abort(); abort();
} }
__attribute__((naked)) static void prvTaskExitError(void)
{
asm volatile(".option push\n" \
".option norvc\n" \
"nop\n" \
".option pop");
/* Task entry's RA will point here. Shifting RA into prvTaskExitError is necessary
to make GDB backtrace ending inside that function.
Otherwise backtrace will end in the function laying just before prvTaskExitError in address space. */
_prvTaskExitError();
}
/* Clear current interrupt mask and set given mask */ /* Clear current interrupt mask and set given mask */
void vPortClearInterruptMask(int mask) void vPortClearInterruptMask(int mask)
{ {
@@ -282,7 +294,9 @@ StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxC
sp -= RV_STK_FRMSZ; sp -= RV_STK_FRMSZ;
RvExcFrame *frame = (RvExcFrame *)sp; RvExcFrame *frame = (RvExcFrame *)sp;
memset(frame, 0, sizeof(*frame)); memset(frame, 0, sizeof(*frame));
frame->ra = (UBaseType_t)prvTaskExitError; /* Shifting RA into prvTaskExitError is necessary to make GDB backtrace ending inside that function.
Otherwise backtrace will end in the function laying just before prvTaskExitError in address space. */
frame->ra = (UBaseType_t)prvTaskExitError + 4/*size of the nop insruction at the beginning of prvTaskExitError*/;
frame->mepc = (UBaseType_t)pxCode; frame->mepc = (UBaseType_t)pxCode;
frame->a0 = (UBaseType_t)pvParameters; frame->a0 = (UBaseType_t)pvParameters;
frame->gp = (UBaseType_t)&__global_pointer$; frame->gp = (UBaseType_t)&__global_pointer$;

View File

@@ -20,6 +20,7 @@
#include "soc/assist_debug_reg.h" #include "soc/assist_debug_reg.h"
#include "esp_attr.h" #include "esp_attr.h"
#include "riscv/csr.h" #include "riscv/csr.h"
#include "riscv/semihosting.h"
/*performance counter*/ /*performance counter*/
#define CSR_PCER_MACHINE 0x7e0 #define CSR_PCER_MACHINE 0x7e0
@@ -72,8 +73,29 @@ static inline void cpu_ll_init_hwloop(void)
// Nothing needed here for ESP32-C3 // Nothing needed here for ESP32-C3
} }
static inline bool cpu_ll_is_debugger_attached(void)
{
return REG_GET_BIT(ASSIST_DEBUG_C0RE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
}
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc) static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
{ {
if (cpu_ll_is_debugger_attached()) {
/* If we want to set breakpoint which when hit transfers control to debugger
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
* That `action` value is supported only when `dmode` of `tdata1` is set.
* But `dmode` can be modified by debugger only (from Debug Mode).
*
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
*/
long args[] = {true, id, (long)pc};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0) {
return;
}
}
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
* instead transfering control to debugger. */
RV_WRITE_CSR(tselect,id); RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE); RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE); RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
@@ -83,6 +105,14 @@ static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
static inline void cpu_ll_clear_breakpoint(int id) static inline void cpu_ll_clear_breakpoint(int id)
{ {
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id); RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE); RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE); RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
@@ -106,6 +136,17 @@ static inline void cpu_ll_set_watchpoint(int id,
bool on_write) bool on_write)
{ {
uint32_t addr_napot; uint32_t addr_napot;
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {true, id, (long)addr, (long)size,
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0) {
return;
}
}
RV_WRITE_CSR(tselect,id); RV_WRITE_CSR(tselect,id);
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE); RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE); RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
@@ -124,6 +165,14 @@ static inline void cpu_ll_set_watchpoint(int id,
static inline void cpu_ll_clear_watchpoint(int id) static inline void cpu_ll_clear_watchpoint(int id)
{ {
if (cpu_ll_is_debugger_attached()) {
/* see description in cpu_ll_set_breakpoint() */
long args[] = {false, id};
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
if (ret == 0){
return;
}
}
RV_WRITE_CSR(tselect,id); RV_WRITE_CSR(tselect,id);
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE); RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE); RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
@@ -133,11 +182,6 @@ static inline void cpu_ll_clear_watchpoint(int id)
return; return;
} }
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
{
return REG_GET_BIT(ASSIST_DEBUG_C0RE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
}
static inline void cpu_ll_break(void) static inline void cpu_ll_break(void)
{ {
asm volatile("ebreak\n"); asm volatile("ebreak\n");

View File

@@ -0,0 +1,99 @@
/*
* SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
/* ESP custom semihosting calls numbers */
/**
* @brief Set/clear breakpoint
*
* @param set if true set breakpoint, otherwise clear it
* @param id breakpoint ID
* @param addr address to set breakpoint at. Ignored if `set` is false.
* @return return 0 on sucess or non-zero error code
*/
#define ESP_SEMIHOSTING_SYS_BREAKPOINT_SET 0x66
/**
* @brief Set/clear watchpoint
*
* @param set if true set watchpoint, otherwise clear it
* @param id watchpoint ID
* @param addr address to set watchpoint at. Ignored if `set` is false.
* @param size size of watchpoint. Ignored if `set` is false.
* @param flags watchpoint flags, see description below. Ignored if `set` is false.
* @return return 0 on sucess or non-zero error code
*/
#define ESP_SEMIHOSTING_SYS_WATCHPOINT_SET 0x67
/* bit values for `flags` argument of ESP_SEMIHOSTING_SYS_WATCHPOINT_SET call. Can be ORed. */
/* watch for 'reads' at `addr` */
#define ESP_SEMIHOSTING_WP_FLG_RD (1UL << 0)
/* watch for 'writes' at `addr` */
#define ESP_SEMIHOSTING_WP_FLG_WR (1UL << 1)
/**
* @brief Perform semihosting call
*
* See https://github.com/riscv/riscv-semihosting-spec/ and the linked
* ARM semihosting spec for details.
*
* @param id semihosting call number
* @param data data block to pass to the host; number of items and their
* meaning depends on the semihosting call. See the spec for
* details.
*
* @return return value from the host
*/
static inline long semihosting_call_noerrno(long id, long *data)
{
register long a0 asm ("a0") = id;
register long a1 asm ("a1") = (long) data;
__asm__ __volatile__ (
".option push\n"
".option norvc\n"
"slli zero, zero, 0x1f\n"
"ebreak\n"
"srai zero, zero, 0x7\n"
".option pop\n"
: "+r"(a0) : "r"(a1) : "memory");
return a0;
}
/**
* @brief Perform semihosting call and retrieve errno
*
* @param id semihosting call number
* @param data data block to pass to the host; number of items and their
* meaning depends on the semihosting call. See the spec for
* details.
* @param[out] out_errno output, errno value from the host. Only set if
* the return value is negative.
* @return return value from the host
*/
static inline long semihosting_call(long id, long *data, int *out_errno)
{
long ret = semihosting_call_noerrno(id, data);
if (ret < 0) {
/* Constant also defined in openocd_semihosting.h,
* which is common for RISC-V and Xtensa; it is not included here
* to avoid a circular dependency.
*/
const int semihosting_sys_errno = 0x13;
*out_errno = (int) semihosting_call_noerrno(semihosting_sys_errno, NULL);
}
return ret;
}
#ifdef __cplusplus
}
#endif

View File

@@ -18,285 +18,299 @@
#include "sdkconfig.h" #include "sdkconfig.h"
.equ SAVE_REGS, 32 .equ SAVE_REGS, 32
.equ CONTEXT_SIZE, (SAVE_REGS * 4) .equ CONTEXT_SIZE, (SAVE_REGS * 4)
.equ panic_from_exception, xt_unhandled_exception .equ panic_from_exception, xt_unhandled_exception
.equ panic_from_isr, panicHandler .equ panic_from_isr, panicHandler
.macro save_regs /* Macro which first allocates space on the stack to save general
addi sp, sp, -CONTEXT_SIZE * purpose registers, and then save them. GP register is excluded.
sw ra, RV_STK_RA(sp) * The default size allocated on the stack is CONTEXT_SIZE, but it
sw tp, RV_STK_TP(sp) * can be overridden. */
sw t0, RV_STK_T0(sp) .macro save_general_regs cxt_size=CONTEXT_SIZE
sw t1, RV_STK_T1(sp) addi sp, sp, -\cxt_size
sw t2, RV_STK_T2(sp) sw ra, RV_STK_RA(sp)
sw s0, RV_STK_S0(sp) sw tp, RV_STK_TP(sp)
sw s1, RV_STK_S1(sp) sw t0, RV_STK_T0(sp)
sw a0, RV_STK_A0(sp) sw t1, RV_STK_T1(sp)
sw a1, RV_STK_A1(sp) sw t2, RV_STK_T2(sp)
sw a2, RV_STK_A2(sp) sw s0, RV_STK_S0(sp)
sw a3, RV_STK_A3(sp) sw s1, RV_STK_S1(sp)
sw a4, RV_STK_A4(sp) sw a0, RV_STK_A0(sp)
sw a5, RV_STK_A5(sp) sw a1, RV_STK_A1(sp)
sw a6, RV_STK_A6(sp) sw a2, RV_STK_A2(sp)
sw a7, RV_STK_A7(sp) sw a3, RV_STK_A3(sp)
sw s2, RV_STK_S2(sp) sw a4, RV_STK_A4(sp)
sw s3, RV_STK_S3(sp) sw a5, RV_STK_A5(sp)
sw s4, RV_STK_S4(sp) sw a6, RV_STK_A6(sp)
sw s5, RV_STK_S5(sp) sw a7, RV_STK_A7(sp)
sw s6, RV_STK_S6(sp) sw s2, RV_STK_S2(sp)
sw s7, RV_STK_S7(sp) sw s3, RV_STK_S3(sp)
sw s8, RV_STK_S8(sp) sw s4, RV_STK_S4(sp)
sw s9, RV_STK_S9(sp) sw s5, RV_STK_S5(sp)
sw s10, RV_STK_S10(sp) sw s6, RV_STK_S6(sp)
sw s11, RV_STK_S11(sp) sw s7, RV_STK_S7(sp)
sw t3, RV_STK_T3(sp) sw s8, RV_STK_S8(sp)
sw t4, RV_STK_T4(sp) sw s9, RV_STK_S9(sp)
sw t5, RV_STK_T5(sp) sw s10, RV_STK_S10(sp)
sw t6, RV_STK_T6(sp) sw s11, RV_STK_S11(sp)
sw t3, RV_STK_T3(sp)
sw t4, RV_STK_T4(sp)
sw t5, RV_STK_T5(sp)
sw t6, RV_STK_T6(sp)
.endm .endm
.macro save_mepc .macro save_mepc
csrr t0, mepc csrr t0, mepc
sw t0, RV_STK_MEPC(sp) sw t0, RV_STK_MEPC(sp)
.endm .endm
.macro restore_regs /* Restore the general purpose registers (excluding gp) from the context on
lw ra, RV_STK_RA(sp) * the stack. The context is then deallocated. The default size is CONTEXT_SIZE
lw tp, RV_STK_TP(sp) * but it can be overriden. */
lw t0, RV_STK_T0(sp) .macro restore_general_regs cxt_size=CONTEXT_SIZE
lw t1, RV_STK_T1(sp) lw ra, RV_STK_RA(sp)
lw t2, RV_STK_T2(sp) lw tp, RV_STK_TP(sp)
lw s0, RV_STK_S0(sp) lw t0, RV_STK_T0(sp)
lw s1, RV_STK_S1(sp) lw t1, RV_STK_T1(sp)
lw a0, RV_STK_A0(sp) lw t2, RV_STK_T2(sp)
lw a1, RV_STK_A1(sp) lw s0, RV_STK_S0(sp)
lw a2, RV_STK_A2(sp) lw s1, RV_STK_S1(sp)
lw a3, RV_STK_A3(sp) lw a0, RV_STK_A0(sp)
lw a4, RV_STK_A4(sp) lw a1, RV_STK_A1(sp)
lw a5, RV_STK_A5(sp) lw a2, RV_STK_A2(sp)
lw a6, RV_STK_A6(sp) lw a3, RV_STK_A3(sp)
lw a7, RV_STK_A7(sp) lw a4, RV_STK_A4(sp)
lw s2, RV_STK_S2(sp) lw a5, RV_STK_A5(sp)
lw s3, RV_STK_S3(sp) lw a6, RV_STK_A6(sp)
lw s4, RV_STK_S4(sp) lw a7, RV_STK_A7(sp)
lw s5, RV_STK_S5(sp) lw s2, RV_STK_S2(sp)
lw s6, RV_STK_S6(sp) lw s3, RV_STK_S3(sp)
lw s7, RV_STK_S7(sp) lw s4, RV_STK_S4(sp)
lw s8, RV_STK_S8(sp) lw s5, RV_STK_S5(sp)
lw s9, RV_STK_S9(sp) lw s6, RV_STK_S6(sp)
lw s10, RV_STK_S10(sp) lw s7, RV_STK_S7(sp)
lw s11, RV_STK_S11(sp) lw s8, RV_STK_S8(sp)
lw t3, RV_STK_T3(sp) lw s9, RV_STK_S9(sp)
lw t4, RV_STK_T4(sp) lw s10, RV_STK_S10(sp)
lw t5, RV_STK_T5(sp) lw s11, RV_STK_S11(sp)
lw t6, RV_STK_T6(sp) lw t3, RV_STK_T3(sp)
addi sp, sp, CONTEXT_SIZE lw t4, RV_STK_T4(sp)
lw t5, RV_STK_T5(sp)
lw t6, RV_STK_T6(sp)
addi sp,sp, \cxt_size
.endm .endm
.macro restore_mepc .macro restore_mepc
lw t0, RV_STK_MEPC(sp) lw t0, RV_STK_MEPC(sp)
csrw mepc, t0 csrw mepc, t0
.endm .endm
.global rtos_int_enter .global rtos_int_enter
.global rtos_int_exit .global rtos_int_exit
.global _global_interrupt_handler .global _global_interrupt_handler
.section .exception_vectors.text .section .exception_vectors.text
/* This is the vector table. MTVEC points here. /* This is the vector table. MTVEC points here.
* *
* Use 4-byte intructions here. 1 instruction = 1 entry of the table. * Use 4-byte intructions here. 1 instruction = 1 entry of the table.
* The CPU jumps to MTVEC (i.e. the first entry) in case of an exception, * The CPU jumps to MTVEC (i.e. the first entry) in case of an exception,
* and (MTVEC & 0xfffffffc) + (mcause & 0x7fffffff) * 4, in case of an interrupt. * and (MTVEC & 0xfffffffc) + (mcause & 0x7fffffff) * 4, in case of an interrupt.
* *
* Note: for our CPU, we need to place this on a 256-byte boundary, as CPU * Note: for our CPU, we need to place this on a 256-byte boundary, as CPU
* only uses the 24 MSBs of the MTVEC, i.e. (MTVEC & 0xffffff00). * only uses the 24 MSBs of the MTVEC, i.e. (MTVEC & 0xffffff00).
*/ */
.balign 0x100 .balign 0x100
.global _vector_table .global _vector_table
.type _vector_table, @function .type _vector_table, @function
_vector_table: _vector_table:
.option push .option push
.option norvc .option norvc
j _panic_handler /* exception handler, entry 0 */ j _panic_handler /* exception handler, entry 0 */
.rept (ETS_T1_WDT_INUM - 1) .rept (ETS_T1_WDT_INUM - 1)
j _interrupt_handler /* 24 identical entries, all pointing to the interrupt handler */ j _interrupt_handler /* 24 identical entries, all pointing to the interrupt handler */
.endr .endr
j _panic_handler /* Call panic handler for ETS_T1_WDT_INUM interrupt (soc-level panic)*/ j _panic_handler /* Call panic handler for ETS_T1_WDT_INUM interrupt (soc-level panic)*/
j _panic_handler /* Call panic handler for ETS_CACHEERR_INUM interrupt (soc-level panic)*/ j _panic_handler /* Call panic handler for ETS_CACHEERR_INUM interrupt (soc-level panic)*/
#ifdef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE #ifdef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
j _panic_handler /* Call panic handler for ETS_MEMPROT_ERR_INUM interrupt (soc-level panic)*/ j _panic_handler /* Call panic handler for ETS_MEMPROT_ERR_INUM interrupt (soc-level panic)*/
.rept (ETS_MAX_INUM - ETS_MEMPROT_ERR_INUM) .rept (ETS_MAX_INUM - ETS_MEMPROT_ERR_INUM)
#else #else
.rept (ETS_MAX_INUM - ETS_CACHEERR_INUM) .rept (ETS_MAX_INUM - ETS_CACHEERR_INUM)
#endif #endif //CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
j _interrupt_handler /* 6 identical entries, all pointing to the interrupt handler */ j _interrupt_handler /* 6 identical entries, all pointing to the interrupt handler */
.endr .endr
.option pop .option pop
.size _vector_table, .-_vector_table .size _vector_table, .-_vector_table
/* Exception handler.*/ /* Exception handler.*/
.type _panic_handler, @function .type _panic_handler, @function
_panic_handler: _panic_handler:
addi sp, sp, -RV_STK_FRMSZ /* allocate space on stack to store necessary registers */ /* Allocate space on the stack and store general purpose registers */
/* save general registers */ save_general_regs RV_STK_FRMSZ
sw ra, RV_STK_RA(sp)
sw gp, RV_STK_GP(sp) /* As gp register is not saved by the macro, save it here */
sw tp, RV_STK_TP(sp) sw gp, RV_STK_GP(sp)
sw t0, RV_STK_T0(sp)
sw t1, RV_STK_T1(sp) /* Same goes for the SP value before trapping */
sw t2, RV_STK_T2(sp) addi t0, sp, RV_STK_FRMSZ /* restore sp with the value when trap happened */
sw s0, RV_STK_S0(sp)
sw s1, RV_STK_S1(sp) /* Save CSRs */
sw a0, RV_STK_A0(sp) sw t0, RV_STK_SP(sp)
sw a1, RV_STK_A1(sp) csrr t0, mepc
sw a2, RV_STK_A2(sp) sw t0, RV_STK_MEPC(sp)
sw a3, RV_STK_A3(sp) csrr t0, mstatus
sw a4, RV_STK_A4(sp) sw t0, RV_STK_MSTATUS(sp)
sw a5, RV_STK_A5(sp) csrr t0, mtvec
sw a6, RV_STK_A6(sp) sw t0, RV_STK_MTVEC(sp)
sw a7, RV_STK_A7(sp) csrr t0, mtval
sw s2, RV_STK_S2(sp) sw t0, RV_STK_MTVAL(sp)
sw s3, RV_STK_S3(sp) csrr t0, mhartid
sw s4, RV_STK_S4(sp) sw t0, RV_STK_MHARTID(sp)
sw s5, RV_STK_S5(sp)
sw s6, RV_STK_S6(sp) /* Call panic_from_exception(sp) or panic_from_isr(sp)
sw s7, RV_STK_S7(sp) * depending on whether we have a pseudo excause or not.
sw s8, RV_STK_S8(sp) * If mcause's highest bit is 1, then an interrupt called this routine,
sw s9, RV_STK_S9(sp) * so we have a pseudo excause. Else, it is due to a exception, we don't
sw s10, RV_STK_S10(sp) * have an pseudo excause */
sw s11, RV_STK_S11(sp) mv a0, sp
sw t3, RV_STK_T3(sp) csrr a1, mcause
sw t4, RV_STK_T4(sp) /* Branches instructions don't accept immediates values, so use t1 to
sw t5, RV_STK_T5(sp) * store our comparator */
sw t6, RV_STK_T6(sp) li t0, 0x80000000
addi t0, sp, RV_STK_FRMSZ /* restore sp with the value when trap happened */ bgeu a1, t0, _call_panic_handler
sw t0, RV_STK_SP(sp) sw a1, RV_STK_MCAUSE(sp)
csrr t0, mepc jal panic_from_exception
sw t0, RV_STK_MEPC(sp) /* We arrive here if the exception handler has returned. */
csrr t0, mstatus j _return_from_exception
sw t0, RV_STK_MSTATUS(sp)
csrr t0, mtvec
sw t0, RV_STK_MTVEC(sp)
csrr t0, mtval
sw t0, RV_STK_MTVAL(sp)
csrr t0, mhartid
sw t0, RV_STK_MHARTID(sp)
/* Call panic_from_exception(sp) or panic_from_isr(sp)
* depending on whether we have a pseudo excause or not.
* If mcause's highest bit is 1, then an interrupt called this routine,
* so we have a pseudo excause. Else, it is due to a exception, we don't
* have an pseudo excause */
mv a0, sp
csrr a1, mcause
/* Branches instructions don't accept immediates values, so use t1 to
* store our comparator */
li t0, 0x80000000
bgeu a1, t0, _call_panic_handler
sw a1, RV_STK_MCAUSE(sp)
/* exception_from_panic never returns */
j panic_from_exception
_call_panic_handler: _call_panic_handler:
/* Remove highest bit from mcause (a1) register and save it in the /* Remove highest bit from mcause (a1) register and save it in the
* structure */ * structure */
not t0, t0 not t0, t0
and a1, a1, t0 and a1, a1, t0
sw a1, RV_STK_MCAUSE(sp) sw a1, RV_STK_MCAUSE(sp)
/* exception_from_isr never returns */ jal panic_from_isr
j panic_from_isr
.size panic_from_isr, .-panic_from_isr
/* This is the interrupt handler. /* We arrive here if the exception handler has returned. This means that
* It saves the registers on the stack, * the exception was handled, and the execution flow should resume.
* prepares for interrupt nesting, * Restore the registers and return from the exception.
* re-enables the interrupts, */
* then jumps to the C dispatcher in interrupt.c. _return_from_exception:
*/ restore_mepc
.global _interrupt_handler /* MTVEC and SP are assumed to be unmodified.
.type _interrupt_handler, @function * MSTATUS, MHARTID, MTVAL are read-only and not restored.
*/
lw gp, RV_STK_GP(sp)
restore_general_regs RV_STK_FRMSZ
mret
.size _panic_handler, .-_panic_handler
/* This is the interrupt handler.
* It saves the registers on the stack,
* prepares for interrupt nesting,
* re-enables the interrupts,
* then jumps to the C dispatcher in interrupt.c.
*/
.global _interrupt_handler
.type _interrupt_handler, @function
_interrupt_handler: _interrupt_handler:
/* entry */ /* Start by saving the general purpose registers and the PC value before
save_regs * the interrupt happened. */
save_mepc save_general_regs
save_mepc
/* Before doing anythig preserve the stack pointer */ /* Though it is not necessary we save GP and SP here.
/* It will be saved in current TCB, if needed */ * SP is necessary to help GDB to properly unwind
mv a0, sp * the backtrace of threads preempted by interrupts (OS tick etc.).
call rtos_int_enter * GP is saved just to have its proper value in GDB. */
/* As gp register is not saved by the macro, save it here */
sw gp, RV_STK_GP(sp)
/* Same goes for the SP value before trapping */
addi t0, sp, CONTEXT_SIZE /* restore sp with the value when interrupt happened */
/* Save SP */
sw t0, RV_STK_SP(sp)
/* Before dispatch c handler, restore interrupt to enable nested intr */ /* Before doing anythig preserve the stack pointer */
csrr s1, mcause /* It will be saved in current TCB, if needed */
csrr s2, mstatus mv a0, sp
call rtos_int_enter
/* If this is a non-nested interrupt, SP now points to the interrupt stack */
/* Save the interrupt threshold level */ /* Before dispatch c handler, restore interrupt to enable nested intr */
la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG csrr s1, mcause
lw s3, 0(t0) csrr s2, mstatus
/* Increase interrupt threshold level */ /* Save the interrupt threshold level */
li t2, 0x7fffffff la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
and t1, s1, t2 /* t1 = mcause & mask */ lw s3, 0(t0)
slli t1, t1, 2 /* t1 = mcause * 4 */
la t2, INTC_INT_PRIO_REG(0)
add t1, t2, t1 /* t1 = INTC_INT_PRIO_REG + 4 * mcause */
lw t2, 0(t1) /* t2 = INTC_INT_PRIO_REG[mcause] */
addi t2, t2, 1 /* t2 = t2 +1 */
sw t2, 0(t0) /* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
fence
li t0, 0x8 /* Increase interrupt threshold level */
csrrs t0, mstatus, t0 li t2, 0x7fffffff
and t1, s1, t2 /* t1 = mcause & mask */
slli t1, t1, 2 /* t1 = mcause * 4 */
la t2, INTC_INT_PRIO_REG(0)
add t1, t2, t1 /* t1 = INTC_INT_PRIO_REG + 4 * mcause */
lw t2, 0(t1) /* t2 = INTC_INT_PRIO_REG[mcause] */
addi t2, t2, 1 /* t2 = t2 +1 */
sw t2, 0(t0) /* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
fence
#ifdef CONFIG_PM_TRACE li t0, 0x8
li a0, 0 /* = ESP_PM_TRACE_IDLE */ csrrs t0, mstatus, t0
#if SOC_CPU_CORES_NUM == 1 /* MIE set. Nested interrupts can now occur */
li a1, 0 /* No need to check core ID on single core hardware */
#else
csrr a1, mhartid
#endif
la t0, esp_pm_trace_exit
jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
#endif
#ifdef CONFIG_PM_ENABLE #ifdef CONFIG_PM_TRACE
la t0, esp_pm_impl_isr_hook li a0, 0 /* = ESP_PM_TRACE_IDLE */
jalr t0 /* absolute jump, avoid the 1 MiB range constraint */ #if SOC_CPU_CORES_NUM == 1
#endif li a1, 0 /* No need to check core ID on single core hardware */
#else
csrr a1, mhartid
#endif
la t0, esp_pm_trace_exit
jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
#endif
/* call the C dispatcher */ #ifdef CONFIG_PM_ENABLE
mv a0, sp /* argument 1, stack pointer */ la t0, esp_pm_impl_isr_hook
mv a1, s1 /* argument 2, interrupt number (mcause) */ jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
/* mask off the interrupt flag of mcause */ #endif
li t0, 0x7fffffff
and a1, a1, t0
jal _global_interrupt_handler
/* After dispatch c handler, disable interrupt to make freertos make context switch */ /* call the C dispatcher */
mv a0, sp /* argument 1, stack pointer */
mv a1, s1 /* argument 2, interrupt number (mcause) */
/* mask off the interrupt flag of mcause */
li t0, 0x7fffffff
and a1, a1, t0
jal _global_interrupt_handler
li t0, 0x8 /* After dispatch c handler, disable interrupt to make freertos make context switch */
csrrc t0, mstatus, t0
/* restore the interrupt threshold level */ li t0, 0x8
la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG csrrc t0, mstatus, t0
sw s3, 0(t0) /* MIE cleared. Nested interrupts are disabled */
fence
/* Yield to the next task is needed: */ /* restore the interrupt threshold level */
mv a0, sp la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
call rtos_int_exit sw s3, 0(t0)
fence
/* The next (or current) stack pointer is returned in a0 */ /* Yield to the next task is needed: */
mv sp, a0 mv a0, sp
call rtos_int_exit
/* If this is a non-nested interrupt, context switch called, SP now points to back to task stack. */
/* restore the rest of the registers */ /* The next (or current) stack pointer is returned in a0 */
csrw mcause, s1 mv sp, a0
csrw mstatus, s2
restore_mepc
restore_regs
/* exit, this will also re-enable the interrupts */ /* restore the rest of the registers */
mret csrw mcause, s1
.size _interrupt_handler, .-_interrupt_handler csrw mstatus, s2
restore_mepc
restore_general_regs
/* exit, this will also re-enable the interrupts */
mret
.size _interrupt_handler, .-_interrupt_handler