mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-02 12:14:32 +02:00
Merge branch 'bugfix/interrupted_thread_gdb_bt_v4.4' into 'release/v4.4'
riscv: Fixes GDB backtrace of interrupted threads (v4.4) See merge request espressif/esp-idf!17716
This commit is contained in:
@@ -27,8 +27,6 @@ typedef struct {
|
|||||||
esp_apptrace_mem_block_t * mem_blocks;
|
esp_apptrace_mem_block_t * mem_blocks;
|
||||||
} esp_apptrace_riscv_ctrl_block_t;
|
} esp_apptrace_riscv_ctrl_block_t;
|
||||||
|
|
||||||
#define RISCV_APPTRACE_SYSNR 0x64
|
|
||||||
|
|
||||||
#define ESP_APPTRACE_RISCV_BLOCK_LEN_MSK 0x7FFFUL
|
#define ESP_APPTRACE_RISCV_BLOCK_LEN_MSK 0x7FFFUL
|
||||||
#define ESP_APPTRACE_RISCV_BLOCK_LEN(_l_) ((_l_) & ESP_APPTRACE_RISCV_BLOCK_LEN_MSK)
|
#define ESP_APPTRACE_RISCV_BLOCK_LEN(_l_) ((_l_) & ESP_APPTRACE_RISCV_BLOCK_LEN_MSK)
|
||||||
#define ESP_APPTRACE_RISCV_BLOCK_LEN_GET(_v_) ((_v_) & ESP_APPTRACE_RISCV_BLOCK_LEN_MSK)
|
#define ESP_APPTRACE_RISCV_BLOCK_LEN_GET(_v_) ((_v_) & ESP_APPTRACE_RISCV_BLOCK_LEN_MSK)
|
||||||
@@ -104,7 +102,7 @@ __attribute__((weak)) int esp_apptrace_advertise_ctrl_block(void *ctrl_block_add
|
|||||||
if (!esp_cpu_in_ocd_debug_mode()) {
|
if (!esp_cpu_in_ocd_debug_mode()) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return (int) semihosting_call_noerrno(RISCV_APPTRACE_SYSNR, (long*)ctrl_block_addr);
|
return (int) semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_APPTRACE_INIT, (long*)ctrl_block_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Returns up buffers config.
|
/* Returns up buffers config.
|
||||||
|
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
const static char *TAG = "esp_dbg_stubs";
|
const static char *TAG = "esp_dbg_stubs";
|
||||||
|
|
||||||
#define RISCV_DBG_STUBS_SYSNR 0x65
|
|
||||||
|
|
||||||
/* Advertises apptrace control block address to host */
|
/* Advertises apptrace control block address to host */
|
||||||
static int esp_dbg_stubs_advertise_table(void *stub_table_addr)
|
static int esp_dbg_stubs_advertise_table(void *stub_table_addr)
|
||||||
@@ -22,7 +21,7 @@ static int esp_dbg_stubs_advertise_table(void *stub_table_addr)
|
|||||||
if (!esp_cpu_in_ocd_debug_mode()) {
|
if (!esp_cpu_in_ocd_debug_mode()) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
return (int) semihosting_call_noerrno(RISCV_DBG_STUBS_SYSNR, (long*)stub_table_addr);
|
return (int) semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_DBG_STUBS_INIT, (long*)stub_table_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void esp_dbg_stubs_ll_init(void *stub_table_addr)
|
void esp_dbg_stubs_ll_init(void *stub_table_addr)
|
||||||
|
@@ -153,7 +153,7 @@ void vPortEndScheduler(void)
|
|||||||
|
|
||||||
// ------------------------ Stack --------------------------
|
// ------------------------ Stack --------------------------
|
||||||
|
|
||||||
static void prvTaskExitError(void)
|
__attribute__((noreturn)) static void _prvTaskExitError(void)
|
||||||
{
|
{
|
||||||
/* A function that implements a task must not exit or attempt to return to
|
/* A function that implements a task must not exit or attempt to return to
|
||||||
its caller as there is nothing to return to. If a task wants to exit it
|
its caller as there is nothing to return to. If a task wants to exit it
|
||||||
@@ -166,6 +166,18 @@ static void prvTaskExitError(void)
|
|||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__attribute__((naked)) static void prvTaskExitError(void)
|
||||||
|
{
|
||||||
|
asm volatile(".option push\n" \
|
||||||
|
".option norvc\n" \
|
||||||
|
"nop\n" \
|
||||||
|
".option pop");
|
||||||
|
/* Task entry's RA will point here. Shifting RA into prvTaskExitError is necessary
|
||||||
|
to make GDB backtrace ending inside that function.
|
||||||
|
Otherwise backtrace will end in the function laying just before prvTaskExitError in address space. */
|
||||||
|
_prvTaskExitError();
|
||||||
|
}
|
||||||
|
|
||||||
StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters)
|
StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxCode, void *pvParameters)
|
||||||
{
|
{
|
||||||
extern uint32_t __global_pointer$;
|
extern uint32_t __global_pointer$;
|
||||||
@@ -228,7 +240,9 @@ StackType_t *pxPortInitialiseStack(StackType_t *pxTopOfStack, TaskFunction_t pxC
|
|||||||
sp -= RV_STK_FRMSZ;
|
sp -= RV_STK_FRMSZ;
|
||||||
RvExcFrame *frame = (RvExcFrame *)sp;
|
RvExcFrame *frame = (RvExcFrame *)sp;
|
||||||
memset(frame, 0, sizeof(*frame));
|
memset(frame, 0, sizeof(*frame));
|
||||||
frame->ra = (UBaseType_t)prvTaskExitError;
|
/* Shifting RA into prvTaskExitError is necessary to make GDB backtrace ending inside that function.
|
||||||
|
Otherwise backtrace will end in the function laying just before prvTaskExitError in address space. */
|
||||||
|
frame->ra = (UBaseType_t)prvTaskExitError + 4/*size of the nop insruction at the beginning of prvTaskExitError*/;
|
||||||
frame->mepc = (UBaseType_t)pxCode;
|
frame->mepc = (UBaseType_t)pxCode;
|
||||||
frame->a0 = (UBaseType_t)pvParameters;
|
frame->a0 = (UBaseType_t)pvParameters;
|
||||||
frame->gp = (UBaseType_t)&__global_pointer$;
|
frame->gp = (UBaseType_t)&__global_pointer$;
|
||||||
|
@@ -14,6 +14,7 @@
|
|||||||
#include "soc/assist_debug_reg.h"
|
#include "soc/assist_debug_reg.h"
|
||||||
#include "esp_attr.h"
|
#include "esp_attr.h"
|
||||||
#include "riscv/csr.h"
|
#include "riscv/csr.h"
|
||||||
|
#include "riscv/semihosting.h"
|
||||||
|
|
||||||
/*performance counter*/
|
/*performance counter*/
|
||||||
#define CSR_PCER_MACHINE 0x7e0
|
#define CSR_PCER_MACHINE 0x7e0
|
||||||
@@ -71,8 +72,29 @@ static inline void cpu_ll_init_hwloop(void)
|
|||||||
// Nothing needed here for ESP32-C3
|
// Nothing needed here for ESP32-C3
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
|
||||||
|
{
|
||||||
|
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
|
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
|
||||||
{
|
{
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* If we want to set breakpoint which when hit transfers control to debugger
|
||||||
|
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
|
||||||
|
* That `action` value is supported only when `dmode` of `tdata1` is set.
|
||||||
|
* But `dmode` can be modified by debugger only (from Debug Mode).
|
||||||
|
*
|
||||||
|
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
|
||||||
|
*/
|
||||||
|
long args[] = {true, id, (long)pc};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
|
||||||
|
if (ret == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
|
||||||
|
* instead transfering control to debugger. */
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
||||||
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
||||||
@@ -82,6 +104,14 @@ static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
|
|||||||
|
|
||||||
static inline void cpu_ll_clear_breakpoint(int id)
|
static inline void cpu_ll_clear_breakpoint(int id)
|
||||||
{
|
{
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* see description in cpu_ll_set_breakpoint() */
|
||||||
|
long args[] = {false, id};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
|
||||||
|
if (ret == 0){
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
||||||
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
||||||
@@ -105,6 +135,17 @@ static inline void cpu_ll_set_watchpoint(int id,
|
|||||||
bool on_write)
|
bool on_write)
|
||||||
{
|
{
|
||||||
uint32_t addr_napot;
|
uint32_t addr_napot;
|
||||||
|
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* see description in cpu_ll_set_breakpoint() */
|
||||||
|
long args[] = {true, id, (long)addr, (long)size,
|
||||||
|
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
|
||||||
|
if (ret == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
|
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
|
||||||
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
||||||
@@ -123,6 +164,14 @@ static inline void cpu_ll_set_watchpoint(int id,
|
|||||||
|
|
||||||
static inline void cpu_ll_clear_watchpoint(int id)
|
static inline void cpu_ll_clear_watchpoint(int id)
|
||||||
{
|
{
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* see description in cpu_ll_set_breakpoint() */
|
||||||
|
long args[] = {false, id};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
|
||||||
|
if (ret == 0){
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
||||||
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
||||||
@@ -132,11 +181,6 @@ static inline void cpu_ll_clear_watchpoint(int id)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
|
|
||||||
{
|
|
||||||
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void cpu_ll_break(void)
|
static inline void cpu_ll_break(void)
|
||||||
{
|
{
|
||||||
asm volatile("ebreak\n");
|
asm volatile("ebreak\n");
|
||||||
|
@@ -12,6 +12,7 @@
|
|||||||
#include "soc/assist_debug_reg.h"
|
#include "soc/assist_debug_reg.h"
|
||||||
#include "esp_attr.h"
|
#include "esp_attr.h"
|
||||||
#include "riscv/csr.h"
|
#include "riscv/csr.h"
|
||||||
|
#include "riscv/semihosting.h"
|
||||||
|
|
||||||
/*performance counter*/
|
/*performance counter*/
|
||||||
#define CSR_PCER_MACHINE 0x7e0
|
#define CSR_PCER_MACHINE 0x7e0
|
||||||
@@ -69,8 +70,29 @@ static inline void cpu_ll_init_hwloop(void)
|
|||||||
// Nothing needed here for ESP32-H2
|
// Nothing needed here for ESP32-H2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
|
||||||
|
{
|
||||||
|
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
|
static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
|
||||||
{
|
{
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* If we want to set breakpoint which when hit transfers control to debugger
|
||||||
|
* we need to set `action` in `mcontrol` to 1 (Enter Debug Mode).
|
||||||
|
* That `action` value is supported only when `dmode` of `tdata1` is set.
|
||||||
|
* But `dmode` can be modified by debugger only (from Debug Mode).
|
||||||
|
*
|
||||||
|
* So when debugger is connected we use special syscall to ask it to set breakpoint for us.
|
||||||
|
*/
|
||||||
|
long args[] = {true, id, (long)pc};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
|
||||||
|
if (ret == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* The code bellow sets breakpoint which will trigger `Breakpoint` exception
|
||||||
|
* instead transfering control to debugger. */
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
RV_SET_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
||||||
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
||||||
@@ -80,6 +102,14 @@ static inline void cpu_ll_set_breakpoint(int id, uint32_t pc)
|
|||||||
|
|
||||||
static inline void cpu_ll_clear_breakpoint(int id)
|
static inline void cpu_ll_clear_breakpoint(int id)
|
||||||
{
|
{
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* see description in cpu_ll_set_breakpoint() */
|
||||||
|
long args[] = {false, id};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_BREAKPOINT_SET, args);
|
||||||
|
if (ret == 0){
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
||||||
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE|TDATA1_EXECUTE);
|
||||||
@@ -103,6 +133,17 @@ static inline void cpu_ll_set_watchpoint(int id,
|
|||||||
bool on_write)
|
bool on_write)
|
||||||
{
|
{
|
||||||
uint32_t addr_napot;
|
uint32_t addr_napot;
|
||||||
|
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* see description in cpu_ll_set_breakpoint() */
|
||||||
|
long args[] = {true, id, (long)addr, (long)size,
|
||||||
|
(long)((on_read ? ESP_SEMIHOSTING_WP_FLG_RD : 0) | (on_write ? ESP_SEMIHOSTING_WP_FLG_WR : 0))};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
|
||||||
|
if (ret == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
|
RV_SET_CSR(CSR_TCONTROL, TCONTROL_MPTE | TCONTROL_MTE);
|
||||||
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
RV_SET_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
||||||
@@ -121,6 +162,14 @@ static inline void cpu_ll_set_watchpoint(int id,
|
|||||||
|
|
||||||
static inline void cpu_ll_clear_watchpoint(int id)
|
static inline void cpu_ll_clear_watchpoint(int id)
|
||||||
{
|
{
|
||||||
|
if (cpu_ll_is_debugger_attached()) {
|
||||||
|
/* see description in cpu_ll_set_breakpoint() */
|
||||||
|
long args[] = {false, id};
|
||||||
|
int ret = semihosting_call_noerrno(ESP_SEMIHOSTING_SYS_WATCHPOINT_SET, args);
|
||||||
|
if (ret == 0){
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
RV_WRITE_CSR(tselect,id);
|
RV_WRITE_CSR(tselect,id);
|
||||||
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
RV_CLEAR_CSR(CSR_TCONTROL,TCONTROL_MTE);
|
||||||
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
RV_CLEAR_CSR(CSR_TDATA1, TDATA1_USER|TDATA1_MACHINE);
|
||||||
@@ -130,11 +179,6 @@ static inline void cpu_ll_clear_watchpoint(int id)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
FORCE_INLINE_ATTR bool cpu_ll_is_debugger_attached(void)
|
|
||||||
{
|
|
||||||
return REG_GET_BIT(ASSIST_DEBUG_CORE_0_DEBUG_MODE_REG, ASSIST_DEBUG_CORE_0_DEBUG_MODULE_ACTIVE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void cpu_ll_break(void)
|
static inline void cpu_ll_break(void)
|
||||||
{
|
{
|
||||||
asm volatile("ebreak\n");
|
asm volatile("ebreak\n");
|
||||||
|
@@ -10,6 +10,53 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/* ESP custom semihosting calls numbers */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Initialize apptrace data at host side
|
||||||
|
*
|
||||||
|
* @param addr address of apptrace control data block
|
||||||
|
* @return return 0 on sucess or non-zero error code
|
||||||
|
*/
|
||||||
|
#define ESP_SEMIHOSTING_SYS_APPTRACE_INIT 0x64
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Initialize debug stubs table at host side
|
||||||
|
*
|
||||||
|
* @param addr address of debug stubs table
|
||||||
|
* @return return 0 on sucess or non-zero error code
|
||||||
|
*/
|
||||||
|
#define ESP_SEMIHOSTING_SYS_DBG_STUBS_INIT 0x65
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Set/clear breakpoint
|
||||||
|
*
|
||||||
|
* @param set if true set breakpoint, otherwise clear it
|
||||||
|
* @param id breakpoint ID
|
||||||
|
* @param addr address to set breakpoint at. Ignored if `set` is false.
|
||||||
|
* @return return 0 on sucess or non-zero error code
|
||||||
|
*/
|
||||||
|
#define ESP_SEMIHOSTING_SYS_BREAKPOINT_SET 0x66
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Set/clear watchpoint
|
||||||
|
*
|
||||||
|
* @param set if true set watchpoint, otherwise clear it
|
||||||
|
* @param id watchpoint ID
|
||||||
|
* @param addr address to set watchpoint at. Ignored if `set` is false.
|
||||||
|
* @param size size of watchpoint. Ignored if `set` is false.
|
||||||
|
* @param flags watchpoint flags, see description below. Ignored if `set` is false.
|
||||||
|
* @return return 0 on sucess or non-zero error code
|
||||||
|
*/
|
||||||
|
#define ESP_SEMIHOSTING_SYS_WATCHPOINT_SET 0x67
|
||||||
|
|
||||||
|
/* bit values for `flags` argument of ESP_SEMIHOSTING_SYS_WATCHPOINT_SET call. Can be ORed. */
|
||||||
|
/* watch for 'reads' at `addr` */
|
||||||
|
#define ESP_SEMIHOSTING_WP_FLG_RD (1UL << 0)
|
||||||
|
/* watch for 'writes' at `addr` */
|
||||||
|
#define ESP_SEMIHOSTING_WP_FLG_WR (1UL << 1)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Perform semihosting call
|
* @brief Perform semihosting call
|
||||||
*
|
*
|
||||||
|
@@ -18,285 +18,299 @@
|
|||||||
#include "sdkconfig.h"
|
#include "sdkconfig.h"
|
||||||
|
|
||||||
|
|
||||||
.equ SAVE_REGS, 32
|
.equ SAVE_REGS, 32
|
||||||
.equ CONTEXT_SIZE, (SAVE_REGS * 4)
|
.equ CONTEXT_SIZE, (SAVE_REGS * 4)
|
||||||
.equ panic_from_exception, xt_unhandled_exception
|
.equ panic_from_exception, xt_unhandled_exception
|
||||||
.equ panic_from_isr, panicHandler
|
.equ panic_from_isr, panicHandler
|
||||||
|
|
||||||
.macro save_regs
|
/* Macro which first allocates space on the stack to save general
|
||||||
addi sp, sp, -CONTEXT_SIZE
|
* purpose registers, and then save them. GP register is excluded.
|
||||||
sw ra, RV_STK_RA(sp)
|
* The default size allocated on the stack is CONTEXT_SIZE, but it
|
||||||
sw tp, RV_STK_TP(sp)
|
* can be overridden. */
|
||||||
sw t0, RV_STK_T0(sp)
|
.macro save_general_regs cxt_size=CONTEXT_SIZE
|
||||||
sw t1, RV_STK_T1(sp)
|
addi sp, sp, -\cxt_size
|
||||||
sw t2, RV_STK_T2(sp)
|
sw ra, RV_STK_RA(sp)
|
||||||
sw s0, RV_STK_S0(sp)
|
sw tp, RV_STK_TP(sp)
|
||||||
sw s1, RV_STK_S1(sp)
|
sw t0, RV_STK_T0(sp)
|
||||||
sw a0, RV_STK_A0(sp)
|
sw t1, RV_STK_T1(sp)
|
||||||
sw a1, RV_STK_A1(sp)
|
sw t2, RV_STK_T2(sp)
|
||||||
sw a2, RV_STK_A2(sp)
|
sw s0, RV_STK_S0(sp)
|
||||||
sw a3, RV_STK_A3(sp)
|
sw s1, RV_STK_S1(sp)
|
||||||
sw a4, RV_STK_A4(sp)
|
sw a0, RV_STK_A0(sp)
|
||||||
sw a5, RV_STK_A5(sp)
|
sw a1, RV_STK_A1(sp)
|
||||||
sw a6, RV_STK_A6(sp)
|
sw a2, RV_STK_A2(sp)
|
||||||
sw a7, RV_STK_A7(sp)
|
sw a3, RV_STK_A3(sp)
|
||||||
sw s2, RV_STK_S2(sp)
|
sw a4, RV_STK_A4(sp)
|
||||||
sw s3, RV_STK_S3(sp)
|
sw a5, RV_STK_A5(sp)
|
||||||
sw s4, RV_STK_S4(sp)
|
sw a6, RV_STK_A6(sp)
|
||||||
sw s5, RV_STK_S5(sp)
|
sw a7, RV_STK_A7(sp)
|
||||||
sw s6, RV_STK_S6(sp)
|
sw s2, RV_STK_S2(sp)
|
||||||
sw s7, RV_STK_S7(sp)
|
sw s3, RV_STK_S3(sp)
|
||||||
sw s8, RV_STK_S8(sp)
|
sw s4, RV_STK_S4(sp)
|
||||||
sw s9, RV_STK_S9(sp)
|
sw s5, RV_STK_S5(sp)
|
||||||
sw s10, RV_STK_S10(sp)
|
sw s6, RV_STK_S6(sp)
|
||||||
sw s11, RV_STK_S11(sp)
|
sw s7, RV_STK_S7(sp)
|
||||||
sw t3, RV_STK_T3(sp)
|
sw s8, RV_STK_S8(sp)
|
||||||
sw t4, RV_STK_T4(sp)
|
sw s9, RV_STK_S9(sp)
|
||||||
sw t5, RV_STK_T5(sp)
|
sw s10, RV_STK_S10(sp)
|
||||||
sw t6, RV_STK_T6(sp)
|
sw s11, RV_STK_S11(sp)
|
||||||
|
sw t3, RV_STK_T3(sp)
|
||||||
|
sw t4, RV_STK_T4(sp)
|
||||||
|
sw t5, RV_STK_T5(sp)
|
||||||
|
sw t6, RV_STK_T6(sp)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro save_mepc
|
.macro save_mepc
|
||||||
csrr t0, mepc
|
csrr t0, mepc
|
||||||
sw t0, RV_STK_MEPC(sp)
|
sw t0, RV_STK_MEPC(sp)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro restore_regs
|
/* Restore the general purpose registers (excluding gp) from the context on
|
||||||
lw ra, RV_STK_RA(sp)
|
* the stack. The context is then deallocated. The default size is CONTEXT_SIZE
|
||||||
lw tp, RV_STK_TP(sp)
|
* but it can be overriden. */
|
||||||
lw t0, RV_STK_T0(sp)
|
.macro restore_general_regs cxt_size=CONTEXT_SIZE
|
||||||
lw t1, RV_STK_T1(sp)
|
lw ra, RV_STK_RA(sp)
|
||||||
lw t2, RV_STK_T2(sp)
|
lw tp, RV_STK_TP(sp)
|
||||||
lw s0, RV_STK_S0(sp)
|
lw t0, RV_STK_T0(sp)
|
||||||
lw s1, RV_STK_S1(sp)
|
lw t1, RV_STK_T1(sp)
|
||||||
lw a0, RV_STK_A0(sp)
|
lw t2, RV_STK_T2(sp)
|
||||||
lw a1, RV_STK_A1(sp)
|
lw s0, RV_STK_S0(sp)
|
||||||
lw a2, RV_STK_A2(sp)
|
lw s1, RV_STK_S1(sp)
|
||||||
lw a3, RV_STK_A3(sp)
|
lw a0, RV_STK_A0(sp)
|
||||||
lw a4, RV_STK_A4(sp)
|
lw a1, RV_STK_A1(sp)
|
||||||
lw a5, RV_STK_A5(sp)
|
lw a2, RV_STK_A2(sp)
|
||||||
lw a6, RV_STK_A6(sp)
|
lw a3, RV_STK_A3(sp)
|
||||||
lw a7, RV_STK_A7(sp)
|
lw a4, RV_STK_A4(sp)
|
||||||
lw s2, RV_STK_S2(sp)
|
lw a5, RV_STK_A5(sp)
|
||||||
lw s3, RV_STK_S3(sp)
|
lw a6, RV_STK_A6(sp)
|
||||||
lw s4, RV_STK_S4(sp)
|
lw a7, RV_STK_A7(sp)
|
||||||
lw s5, RV_STK_S5(sp)
|
lw s2, RV_STK_S2(sp)
|
||||||
lw s6, RV_STK_S6(sp)
|
lw s3, RV_STK_S3(sp)
|
||||||
lw s7, RV_STK_S7(sp)
|
lw s4, RV_STK_S4(sp)
|
||||||
lw s8, RV_STK_S8(sp)
|
lw s5, RV_STK_S5(sp)
|
||||||
lw s9, RV_STK_S9(sp)
|
lw s6, RV_STK_S6(sp)
|
||||||
lw s10, RV_STK_S10(sp)
|
lw s7, RV_STK_S7(sp)
|
||||||
lw s11, RV_STK_S11(sp)
|
lw s8, RV_STK_S8(sp)
|
||||||
lw t3, RV_STK_T3(sp)
|
lw s9, RV_STK_S9(sp)
|
||||||
lw t4, RV_STK_T4(sp)
|
lw s10, RV_STK_S10(sp)
|
||||||
lw t5, RV_STK_T5(sp)
|
lw s11, RV_STK_S11(sp)
|
||||||
lw t6, RV_STK_T6(sp)
|
lw t3, RV_STK_T3(sp)
|
||||||
addi sp, sp, CONTEXT_SIZE
|
lw t4, RV_STK_T4(sp)
|
||||||
|
lw t5, RV_STK_T5(sp)
|
||||||
|
lw t6, RV_STK_T6(sp)
|
||||||
|
addi sp,sp, \cxt_size
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro restore_mepc
|
.macro restore_mepc
|
||||||
lw t0, RV_STK_MEPC(sp)
|
lw t0, RV_STK_MEPC(sp)
|
||||||
csrw mepc, t0
|
csrw mepc, t0
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.global rtos_int_enter
|
.global rtos_int_enter
|
||||||
.global rtos_int_exit
|
.global rtos_int_exit
|
||||||
.global _global_interrupt_handler
|
.global _global_interrupt_handler
|
||||||
|
|
||||||
.section .exception_vectors.text
|
.section .exception_vectors.text
|
||||||
/* This is the vector table. MTVEC points here.
|
/* This is the vector table. MTVEC points here.
|
||||||
*
|
*
|
||||||
* Use 4-byte intructions here. 1 instruction = 1 entry of the table.
|
* Use 4-byte intructions here. 1 instruction = 1 entry of the table.
|
||||||
* The CPU jumps to MTVEC (i.e. the first entry) in case of an exception,
|
* The CPU jumps to MTVEC (i.e. the first entry) in case of an exception,
|
||||||
* and (MTVEC & 0xfffffffc) + (mcause & 0x7fffffff) * 4, in case of an interrupt.
|
* and (MTVEC & 0xfffffffc) + (mcause & 0x7fffffff) * 4, in case of an interrupt.
|
||||||
*
|
*
|
||||||
* Note: for our CPU, we need to place this on a 256-byte boundary, as CPU
|
* Note: for our CPU, we need to place this on a 256-byte boundary, as CPU
|
||||||
* only uses the 24 MSBs of the MTVEC, i.e. (MTVEC & 0xffffff00).
|
* only uses the 24 MSBs of the MTVEC, i.e. (MTVEC & 0xffffff00).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
.balign 0x100
|
.balign 0x100
|
||||||
.global _vector_table
|
.global _vector_table
|
||||||
.type _vector_table, @function
|
.type _vector_table, @function
|
||||||
_vector_table:
|
_vector_table:
|
||||||
.option push
|
.option push
|
||||||
.option norvc
|
.option norvc
|
||||||
j _panic_handler /* exception handler, entry 0 */
|
j _panic_handler /* exception handler, entry 0 */
|
||||||
.rept (ETS_T1_WDT_INUM - 1)
|
.rept (ETS_T1_WDT_INUM - 1)
|
||||||
j _interrupt_handler /* 24 identical entries, all pointing to the interrupt handler */
|
j _interrupt_handler /* 24 identical entries, all pointing to the interrupt handler */
|
||||||
.endr
|
.endr
|
||||||
j _panic_handler /* Call panic handler for ETS_T1_WDT_INUM interrupt (soc-level panic)*/
|
j _panic_handler /* Call panic handler for ETS_T1_WDT_INUM interrupt (soc-level panic)*/
|
||||||
j _panic_handler /* Call panic handler for ETS_CACHEERR_INUM interrupt (soc-level panic)*/
|
j _panic_handler /* Call panic handler for ETS_CACHEERR_INUM interrupt (soc-level panic)*/
|
||||||
#ifdef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
|
#ifdef CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
|
||||||
j _panic_handler /* Call panic handler for ETS_MEMPROT_ERR_INUM interrupt (soc-level panic)*/
|
j _panic_handler /* Call panic handler for ETS_MEMPROT_ERR_INUM interrupt (soc-level panic)*/
|
||||||
.rept (ETS_MAX_INUM - ETS_MEMPROT_ERR_INUM)
|
.rept (ETS_MAX_INUM - ETS_MEMPROT_ERR_INUM)
|
||||||
#else
|
#else
|
||||||
.rept (ETS_MAX_INUM - ETS_CACHEERR_INUM)
|
.rept (ETS_MAX_INUM - ETS_CACHEERR_INUM)
|
||||||
#endif
|
#endif //CONFIG_ESP_SYSTEM_MEMPROT_FEATURE
|
||||||
j _interrupt_handler /* 6 identical entries, all pointing to the interrupt handler */
|
j _interrupt_handler /* 6 identical entries, all pointing to the interrupt handler */
|
||||||
.endr
|
.endr
|
||||||
|
|
||||||
.option pop
|
.option pop
|
||||||
.size _vector_table, .-_vector_table
|
.size _vector_table, .-_vector_table
|
||||||
|
|
||||||
/* Exception handler.*/
|
/* Exception handler.*/
|
||||||
.type _panic_handler, @function
|
.type _panic_handler, @function
|
||||||
_panic_handler:
|
_panic_handler:
|
||||||
addi sp, sp, -RV_STK_FRMSZ /* allocate space on stack to store necessary registers */
|
/* Allocate space on the stack and store general purpose registers */
|
||||||
/* save general registers */
|
save_general_regs RV_STK_FRMSZ
|
||||||
sw ra, RV_STK_RA(sp)
|
|
||||||
sw gp, RV_STK_GP(sp)
|
/* As gp register is not saved by the macro, save it here */
|
||||||
sw tp, RV_STK_TP(sp)
|
sw gp, RV_STK_GP(sp)
|
||||||
sw t0, RV_STK_T0(sp)
|
|
||||||
sw t1, RV_STK_T1(sp)
|
/* Same goes for the SP value before trapping */
|
||||||
sw t2, RV_STK_T2(sp)
|
addi t0, sp, RV_STK_FRMSZ /* restore sp with the value when trap happened */
|
||||||
sw s0, RV_STK_S0(sp)
|
|
||||||
sw s1, RV_STK_S1(sp)
|
/* Save CSRs */
|
||||||
sw a0, RV_STK_A0(sp)
|
sw t0, RV_STK_SP(sp)
|
||||||
sw a1, RV_STK_A1(sp)
|
csrr t0, mepc
|
||||||
sw a2, RV_STK_A2(sp)
|
sw t0, RV_STK_MEPC(sp)
|
||||||
sw a3, RV_STK_A3(sp)
|
csrr t0, mstatus
|
||||||
sw a4, RV_STK_A4(sp)
|
sw t0, RV_STK_MSTATUS(sp)
|
||||||
sw a5, RV_STK_A5(sp)
|
csrr t0, mtvec
|
||||||
sw a6, RV_STK_A6(sp)
|
sw t0, RV_STK_MTVEC(sp)
|
||||||
sw a7, RV_STK_A7(sp)
|
csrr t0, mtval
|
||||||
sw s2, RV_STK_S2(sp)
|
sw t0, RV_STK_MTVAL(sp)
|
||||||
sw s3, RV_STK_S3(sp)
|
csrr t0, mhartid
|
||||||
sw s4, RV_STK_S4(sp)
|
sw t0, RV_STK_MHARTID(sp)
|
||||||
sw s5, RV_STK_S5(sp)
|
|
||||||
sw s6, RV_STK_S6(sp)
|
/* Call panic_from_exception(sp) or panic_from_isr(sp)
|
||||||
sw s7, RV_STK_S7(sp)
|
* depending on whether we have a pseudo excause or not.
|
||||||
sw s8, RV_STK_S8(sp)
|
* If mcause's highest bit is 1, then an interrupt called this routine,
|
||||||
sw s9, RV_STK_S9(sp)
|
* so we have a pseudo excause. Else, it is due to a exception, we don't
|
||||||
sw s10, RV_STK_S10(sp)
|
* have an pseudo excause */
|
||||||
sw s11, RV_STK_S11(sp)
|
mv a0, sp
|
||||||
sw t3, RV_STK_T3(sp)
|
csrr a1, mcause
|
||||||
sw t4, RV_STK_T4(sp)
|
/* Branches instructions don't accept immediates values, so use t1 to
|
||||||
sw t5, RV_STK_T5(sp)
|
* store our comparator */
|
||||||
sw t6, RV_STK_T6(sp)
|
li t0, 0x80000000
|
||||||
addi t0, sp, RV_STK_FRMSZ /* restore sp with the value when trap happened */
|
bgeu a1, t0, _call_panic_handler
|
||||||
sw t0, RV_STK_SP(sp)
|
sw a1, RV_STK_MCAUSE(sp)
|
||||||
csrr t0, mepc
|
jal panic_from_exception
|
||||||
sw t0, RV_STK_MEPC(sp)
|
/* We arrive here if the exception handler has returned. */
|
||||||
csrr t0, mstatus
|
j _return_from_exception
|
||||||
sw t0, RV_STK_MSTATUS(sp)
|
|
||||||
csrr t0, mtvec
|
|
||||||
sw t0, RV_STK_MTVEC(sp)
|
|
||||||
csrr t0, mtval
|
|
||||||
sw t0, RV_STK_MTVAL(sp)
|
|
||||||
csrr t0, mhartid
|
|
||||||
sw t0, RV_STK_MHARTID(sp)
|
|
||||||
|
|
||||||
/* Call panic_from_exception(sp) or panic_from_isr(sp)
|
|
||||||
* depending on whether we have a pseudo excause or not.
|
|
||||||
* If mcause's highest bit is 1, then an interrupt called this routine,
|
|
||||||
* so we have a pseudo excause. Else, it is due to a exception, we don't
|
|
||||||
* have an pseudo excause */
|
|
||||||
mv a0, sp
|
|
||||||
csrr a1, mcause
|
|
||||||
/* Branches instructions don't accept immediates values, so use t1 to
|
|
||||||
* store our comparator */
|
|
||||||
li t0, 0x80000000
|
|
||||||
bgeu a1, t0, _call_panic_handler
|
|
||||||
sw a1, RV_STK_MCAUSE(sp)
|
|
||||||
/* exception_from_panic never returns */
|
|
||||||
j panic_from_exception
|
|
||||||
_call_panic_handler:
|
_call_panic_handler:
|
||||||
/* Remove highest bit from mcause (a1) register and save it in the
|
/* Remove highest bit from mcause (a1) register and save it in the
|
||||||
* structure */
|
* structure */
|
||||||
not t0, t0
|
not t0, t0
|
||||||
and a1, a1, t0
|
and a1, a1, t0
|
||||||
sw a1, RV_STK_MCAUSE(sp)
|
sw a1, RV_STK_MCAUSE(sp)
|
||||||
/* exception_from_isr never returns */
|
jal panic_from_isr
|
||||||
j panic_from_isr
|
|
||||||
.size panic_from_isr, .-panic_from_isr
|
|
||||||
|
|
||||||
/* This is the interrupt handler.
|
/* We arrive here if the exception handler has returned. This means that
|
||||||
* It saves the registers on the stack,
|
* the exception was handled, and the execution flow should resume.
|
||||||
* prepares for interrupt nesting,
|
* Restore the registers and return from the exception.
|
||||||
* re-enables the interrupts,
|
*/
|
||||||
* then jumps to the C dispatcher in interrupt.c.
|
_return_from_exception:
|
||||||
*/
|
restore_mepc
|
||||||
.global _interrupt_handler
|
/* MTVEC and SP are assumed to be unmodified.
|
||||||
.type _interrupt_handler, @function
|
* MSTATUS, MHARTID, MTVAL are read-only and not restored.
|
||||||
|
*/
|
||||||
|
lw gp, RV_STK_GP(sp)
|
||||||
|
restore_general_regs RV_STK_FRMSZ
|
||||||
|
mret
|
||||||
|
.size _panic_handler, .-_panic_handler
|
||||||
|
|
||||||
|
/* This is the interrupt handler.
|
||||||
|
* It saves the registers on the stack,
|
||||||
|
* prepares for interrupt nesting,
|
||||||
|
* re-enables the interrupts,
|
||||||
|
* then jumps to the C dispatcher in interrupt.c.
|
||||||
|
*/
|
||||||
|
.global _interrupt_handler
|
||||||
|
.type _interrupt_handler, @function
|
||||||
_interrupt_handler:
|
_interrupt_handler:
|
||||||
/* entry */
|
/* Start by saving the general purpose registers and the PC value before
|
||||||
save_regs
|
* the interrupt happened. */
|
||||||
save_mepc
|
save_general_regs
|
||||||
|
save_mepc
|
||||||
|
|
||||||
/* Before doing anythig preserve the stack pointer */
|
/* Though it is not necessary we save GP and SP here.
|
||||||
/* It will be saved in current TCB, if needed */
|
* SP is necessary to help GDB to properly unwind
|
||||||
mv a0, sp
|
* the backtrace of threads preempted by interrupts (OS tick etc.).
|
||||||
call rtos_int_enter
|
* GP is saved just to have its proper value in GDB. */
|
||||||
|
/* As gp register is not saved by the macro, save it here */
|
||||||
|
sw gp, RV_STK_GP(sp)
|
||||||
|
/* Same goes for the SP value before trapping */
|
||||||
|
addi t0, sp, CONTEXT_SIZE /* restore sp with the value when interrupt happened */
|
||||||
|
/* Save SP */
|
||||||
|
sw t0, RV_STK_SP(sp)
|
||||||
|
|
||||||
/* Before dispatch c handler, restore interrupt to enable nested intr */
|
/* Before doing anythig preserve the stack pointer */
|
||||||
csrr s1, mcause
|
/* It will be saved in current TCB, if needed */
|
||||||
csrr s2, mstatus
|
mv a0, sp
|
||||||
|
call rtos_int_enter
|
||||||
|
/* If this is a non-nested interrupt, SP now points to the interrupt stack */
|
||||||
|
|
||||||
/* Save the interrupt threshold level */
|
/* Before dispatch c handler, restore interrupt to enable nested intr */
|
||||||
la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
|
csrr s1, mcause
|
||||||
lw s3, 0(t0)
|
csrr s2, mstatus
|
||||||
|
|
||||||
/* Increase interrupt threshold level */
|
/* Save the interrupt threshold level */
|
||||||
li t2, 0x7fffffff
|
la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
|
||||||
and t1, s1, t2 /* t1 = mcause & mask */
|
lw s3, 0(t0)
|
||||||
slli t1, t1, 2 /* t1 = mcause * 4 */
|
|
||||||
la t2, INTC_INT_PRIO_REG(0)
|
|
||||||
add t1, t2, t1 /* t1 = INTC_INT_PRIO_REG + 4 * mcause */
|
|
||||||
lw t2, 0(t1) /* t2 = INTC_INT_PRIO_REG[mcause] */
|
|
||||||
addi t2, t2, 1 /* t2 = t2 +1 */
|
|
||||||
sw t2, 0(t0) /* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
|
|
||||||
fence
|
|
||||||
|
|
||||||
li t0, 0x8
|
/* Increase interrupt threshold level */
|
||||||
csrrs t0, mstatus, t0
|
li t2, 0x7fffffff
|
||||||
|
and t1, s1, t2 /* t1 = mcause & mask */
|
||||||
|
slli t1, t1, 2 /* t1 = mcause * 4 */
|
||||||
|
la t2, INTC_INT_PRIO_REG(0)
|
||||||
|
add t1, t2, t1 /* t1 = INTC_INT_PRIO_REG + 4 * mcause */
|
||||||
|
lw t2, 0(t1) /* t2 = INTC_INT_PRIO_REG[mcause] */
|
||||||
|
addi t2, t2, 1 /* t2 = t2 +1 */
|
||||||
|
sw t2, 0(t0) /* INTERRUPT_CORE0_CPU_INT_THRESH_REG = t2 */
|
||||||
|
fence
|
||||||
|
|
||||||
#ifdef CONFIG_PM_TRACE
|
li t0, 0x8
|
||||||
li a0, 0 /* = ESP_PM_TRACE_IDLE */
|
csrrs t0, mstatus, t0
|
||||||
#if SOC_CPU_CORES_NUM == 1
|
/* MIE set. Nested interrupts can now occur */
|
||||||
li a1, 0 /* No need to check core ID on single core hardware */
|
|
||||||
#else
|
|
||||||
csrr a1, mhartid
|
|
||||||
#endif
|
|
||||||
la t0, esp_pm_trace_exit
|
|
||||||
jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_PM_ENABLE
|
#ifdef CONFIG_PM_TRACE
|
||||||
la t0, esp_pm_impl_isr_hook
|
li a0, 0 /* = ESP_PM_TRACE_IDLE */
|
||||||
jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
|
#if SOC_CPU_CORES_NUM == 1
|
||||||
#endif
|
li a1, 0 /* No need to check core ID on single core hardware */
|
||||||
|
#else
|
||||||
|
csrr a1, mhartid
|
||||||
|
#endif
|
||||||
|
la t0, esp_pm_trace_exit
|
||||||
|
jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
|
||||||
|
#endif
|
||||||
|
|
||||||
/* call the C dispatcher */
|
#ifdef CONFIG_PM_ENABLE
|
||||||
mv a0, sp /* argument 1, stack pointer */
|
la t0, esp_pm_impl_isr_hook
|
||||||
mv a1, s1 /* argument 2, interrupt number (mcause) */
|
jalr t0 /* absolute jump, avoid the 1 MiB range constraint */
|
||||||
/* mask off the interrupt flag of mcause */
|
#endif
|
||||||
li t0, 0x7fffffff
|
|
||||||
and a1, a1, t0
|
|
||||||
jal _global_interrupt_handler
|
|
||||||
|
|
||||||
/* After dispatch c handler, disable interrupt to make freertos make context switch */
|
/* call the C dispatcher */
|
||||||
|
mv a0, sp /* argument 1, stack pointer */
|
||||||
|
mv a1, s1 /* argument 2, interrupt number (mcause) */
|
||||||
|
/* mask off the interrupt flag of mcause */
|
||||||
|
li t0, 0x7fffffff
|
||||||
|
and a1, a1, t0
|
||||||
|
jal _global_interrupt_handler
|
||||||
|
|
||||||
li t0, 0x8
|
/* After dispatch c handler, disable interrupt to make freertos make context switch */
|
||||||
csrrc t0, mstatus, t0
|
|
||||||
|
|
||||||
/* restore the interrupt threshold level */
|
li t0, 0x8
|
||||||
la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
|
csrrc t0, mstatus, t0
|
||||||
sw s3, 0(t0)
|
/* MIE cleared. Nested interrupts are disabled */
|
||||||
fence
|
|
||||||
|
|
||||||
/* Yield to the next task is needed: */
|
/* restore the interrupt threshold level */
|
||||||
mv a0, sp
|
la t0, INTERRUPT_CORE0_CPU_INT_THRESH_REG
|
||||||
call rtos_int_exit
|
sw s3, 0(t0)
|
||||||
|
fence
|
||||||
|
|
||||||
/* The next (or current) stack pointer is returned in a0 */
|
/* Yield to the next task is needed: */
|
||||||
mv sp, a0
|
mv a0, sp
|
||||||
|
call rtos_int_exit
|
||||||
|
/* If this is a non-nested interrupt, context switch called, SP now points to back to task stack. */
|
||||||
|
|
||||||
/* restore the rest of the registers */
|
/* The next (or current) stack pointer is returned in a0 */
|
||||||
csrw mcause, s1
|
mv sp, a0
|
||||||
csrw mstatus, s2
|
|
||||||
restore_mepc
|
|
||||||
restore_regs
|
|
||||||
|
|
||||||
/* exit, this will also re-enable the interrupts */
|
/* restore the rest of the registers */
|
||||||
mret
|
csrw mcause, s1
|
||||||
.size _interrupt_handler, .-_interrupt_handler
|
csrw mstatus, s2
|
||||||
|
restore_mepc
|
||||||
|
restore_general_regs
|
||||||
|
|
||||||
|
/* exit, this will also re-enable the interrupts */
|
||||||
|
mret
|
||||||
|
.size _interrupt_handler, .-_interrupt_handler
|
||||||
|
Reference in New Issue
Block a user