forked from espressif/esp-idf
fix(freertos): optimize HWLP context switch by disabling it when unused
This commit is contained in:
@@ -6,7 +6,7 @@
|
|||||||
*
|
*
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*
|
*
|
||||||
* SPDX-FileContributor: 2023-2024 Espressif Systems (Shanghai) CO LTD
|
* SPDX-FileContributor: 2023-2025 Espressif Systems (Shanghai) CO LTD
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
* Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
* this software and associated documentation files (the "Software"), to deal in
|
* this software and associated documentation files (the "Software"), to deal in
|
||||||
@@ -297,15 +297,19 @@ static void vPortCleanUpCoprocArea(void *pvTCB)
|
|||||||
const UBaseType_t bottomstack = (UBaseType_t) task->pxDummy8;
|
const UBaseType_t bottomstack = (UBaseType_t) task->pxDummy8;
|
||||||
RvCoprocSaveArea* sa = pxRetrieveCoprocSaveAreaFromStackPointer(bottomstack);
|
RvCoprocSaveArea* sa = pxRetrieveCoprocSaveAreaFromStackPointer(bottomstack);
|
||||||
|
|
||||||
/* If the Task used any coprocessor, check if it is the actual owner of any.
|
/* If the Task ever saved the original stack pointer, restore it before returning */
|
||||||
* If yes, reset the owner. */
|
if (sa->sa_allocator != 0) {
|
||||||
if (sa->sa_enable != 0) {
|
|
||||||
/* Restore the original lowest address of the stack in the TCB */
|
/* Restore the original lowest address of the stack in the TCB */
|
||||||
task->pxDummy6 = sa->sa_tcbstack;
|
task->pxDummy6 = sa->sa_tcbstack;
|
||||||
|
|
||||||
/* Get the core the task is pinned on */
|
/* Get the core the task is pinned on */
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
const BaseType_t coreID = task->xDummyCoreID;
|
const BaseType_t coreID = task->xDummyCoreID;
|
||||||
|
/* If the task is not pinned on any core, it didn't use any coprocessor than need to be freed (FPU or PIE).
|
||||||
|
* If it used the HWLP coprocessor, it has nothing to clear since there is no "owner" for it. */
|
||||||
|
if (coreID == tskNO_AFFINITY) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
#else /* configNUM_CORES > 1 */
|
#else /* configNUM_CORES > 1 */
|
||||||
const BaseType_t coreID = 0;
|
const BaseType_t coreID = 0;
|
||||||
#endif /* configNUM_CORES > 1 */
|
#endif /* configNUM_CORES > 1 */
|
||||||
|
@@ -205,27 +205,28 @@ hwlp_restore_if_used:
|
|||||||
li a2, HWLP_COPROC_IDX
|
li a2, HWLP_COPROC_IDX
|
||||||
call pxPortGetCoprocArea
|
call pxPortGetCoprocArea
|
||||||
/* Get the enable flags from the coprocessor save area */
|
/* Get the enable flags from the coprocessor save area */
|
||||||
lw a1, RV_COPROC_ENABLE(a0)
|
lw a2, RV_COPROC_ENABLE(a0)
|
||||||
/* To avoid having branches below, set the coprocessor enable flag now */
|
andi a1, a2, 1 << HWLP_COPROC_IDX
|
||||||
andi a2, a1, 1 << HWLP_COPROC_IDX
|
beqz a1, _hwlp_restore_end
|
||||||
beqz a2, _hwlp_restore_end
|
|
||||||
/* Enable bit was set, restore the coprocessor context */
|
/* Enable bit was set, restore the coprocessor context */
|
||||||
lw a0, RV_COPROC_SA+HWLP_COPROC_IDX*4(a0) /* a0 = RvCoprocSaveArea->sa_coprocs[HWLP_COPROC_IDX] */
|
lw a3, RV_COPROC_SA+HWLP_COPROC_IDX*4(a0) /* a0 = RvCoprocSaveArea->sa_coprocs[HWLP_COPROC_IDX] */
|
||||||
/* This will set the dirty flag for sure */
|
/* This will set the dirty flag for sure, a2 is preserved */
|
||||||
hwlp_restore_regs a0
|
hwlp_restore_regs a3
|
||||||
#if SOC_CPU_HAS_HWLOOP_STATE_BUG && ESP32P4_REV_MIN_FULL <= 1
|
#if SOC_CPU_HAS_HWLOOP_STATE_BUG && ESP32P4_REV_MIN_FULL <= 1
|
||||||
/* The hardware doesn't update the HWLP state properly after executing the last instruction,
|
/* The hardware doesn't update the HWLP state properly after executing the last instruction,
|
||||||
* as such, we must manually put the state of the HWLP to dirty now if any counter is not 0 */
|
* as such, we must manually put the state of the HWLP to dirty now if any counter is not 0 */
|
||||||
csrr a0, CSR_LOOP0_COUNT
|
csrr a3, CSR_LOOP0_COUNT
|
||||||
bnez a0, _hwlp_restore_end
|
bnez a3, _hwlp_restore_end
|
||||||
csrr a0, CSR_LOOP1_COUNT
|
csrr a3, CSR_LOOP1_COUNT
|
||||||
bnez a0, _hwlp_restore_end
|
bnez a3, _hwlp_restore_end
|
||||||
/* The counters are 0, cleaning the state */
|
/* The counters are 0, mark the HWLP coprocessor as disabled in the enable flag and clean the state */
|
||||||
|
xori a2, a2, 1 << HWLP_COPROC_IDX
|
||||||
|
sw a2, RV_COPROC_ENABLE(a0)
|
||||||
#endif /* SOC_CPU_HAS_HWLOOP_STATE_BUG && ESP32P4_REV_MIN_FULL <= 1 */
|
#endif /* SOC_CPU_HAS_HWLOOP_STATE_BUG && ESP32P4_REV_MIN_FULL <= 1 */
|
||||||
csrwi CSR_HWLP_STATE_REG, HWLP_CLEAN_STATE
|
csrwi CSR_HWLP_STATE_REG, HWLP_CLEAN_STATE
|
||||||
_hwlp_restore_end:
|
_hwlp_restore_end:
|
||||||
lw ra, (sp)
|
lw ra, (sp)
|
||||||
addi sp, sp, 16
|
addi sp, sp, 16
|
||||||
ret
|
ret
|
||||||
|
|
||||||
#endif /* SOC_CPU_HAS_HWLOOP */
|
#endif /* SOC_CPU_HAS_HWLOOP */
|
||||||
|
@@ -236,23 +236,26 @@ _panic_handler:
|
|||||||
la ra, _return_from_exception
|
la ra, _return_from_exception
|
||||||
/* EXT_ILL CSR should contain the reason for the Illegal Instruction */
|
/* EXT_ILL CSR should contain the reason for the Illegal Instruction */
|
||||||
csrrw a0, EXT_ILL_CSR, zero
|
csrrw a0, EXT_ILL_CSR, zero
|
||||||
#if SOC_CPU_HAS_HWLOOP
|
|
||||||
/* Check if the HWLP bit is set. */
|
|
||||||
andi a1, a0, EXT_ILL_RSN_HWLP
|
|
||||||
beqz a1, hwlp_not_used
|
|
||||||
/* HWLP used in an ISR, abort */
|
|
||||||
mv a0, sp
|
|
||||||
j vPortCoprocUsedInISR
|
|
||||||
hwlp_not_used:
|
|
||||||
#endif /* SOC_CPU_HAS_HWLOOP */
|
|
||||||
|
|
||||||
/* Hardware loop cannot be treated lazily, so we should never end here if a HWLP instruction is used */
|
|
||||||
#if SOC_CPU_HAS_PIE
|
#if SOC_CPU_HAS_PIE
|
||||||
/* Check if the PIE bit is set. */
|
/* Check if the PIE bit is set. */
|
||||||
andi a1, a0, EXT_ILL_RSN_PIE
|
andi a1, a0, EXT_ILL_RSN_PIE
|
||||||
bnez a1, rtos_save_pie_coproc
|
bnez a1, rtos_save_pie_coproc
|
||||||
#endif /* SOC_CPU_HAS_PIE */
|
#endif /* SOC_CPU_HAS_PIE */
|
||||||
|
|
||||||
|
/* We cannot check the HWLP bit in a0 since a hardware bug may set this bit even though no HWLP
|
||||||
|
* instruction was executed in the program at all, so check mtval (`t0`) */
|
||||||
|
#if SOC_CPU_HAS_HWLOOP
|
||||||
|
/* HWLP instructions all have an opcode of 0b0101011 */
|
||||||
|
andi a1, t0, 0b1111111
|
||||||
|
addi a1, a1, -0b0101011
|
||||||
|
bnez a1, hwlp_not_used
|
||||||
|
/* HWLP used in an ISR, abort */
|
||||||
|
mv a0, sp
|
||||||
|
j vPortCoprocUsedInISR
|
||||||
|
hwlp_not_used:
|
||||||
|
#endif /* SOC_CPU_HAS_HWLOOP */
|
||||||
|
|
||||||
#if SOC_CPU_HAS_FPU
|
#if SOC_CPU_HAS_FPU
|
||||||
/* Check if the FPU bit is set. When targets have the FPU reason bug (SOC_CPU_HAS_FPU_EXT_ILL_BUG),
|
/* Check if the FPU bit is set. When targets have the FPU reason bug (SOC_CPU_HAS_FPU_EXT_ILL_BUG),
|
||||||
* it is possible that another bit is set even if the reason is an FPU instruction.
|
* it is possible that another bit is set even if the reason is an FPU instruction.
|
||||||
|
Reference in New Issue
Block a user