forked from espressif/esp-idf
Merge branch 'feature/p4_spinlocks' into 'master'
esp32p4: support spinlocks Closes IDF-7771 See merge request espressif/esp-idf!25036
This commit is contained in:
@@ -467,30 +467,7 @@ exit:
|
|||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
//TODO: IDF-7771
|
|
||||||
#else // __riscv
|
#else // __riscv
|
||||||
#if SOC_CPU_CORES_NUM > 1
|
|
||||||
/* We use lr.w and sc.w pair for riscv TAS. lr.w will read the memory and register a cpu lock signal
|
|
||||||
* The state of the lock signal is internal to core, and it is not possible for another core to
|
|
||||||
* interface. sc.w will assert the address is registered. Then write memory and release the lock
|
|
||||||
* signal. During the lr.w and sc.w time, if other core acquires the same address, will wait
|
|
||||||
*/
|
|
||||||
volatile uint32_t old_value = 0xB33FFFFF;
|
|
||||||
volatile int error = 1;
|
|
||||||
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"0: lr.w %0, 0(%2) \n"
|
|
||||||
" bne %0, %3, 1f \n"
|
|
||||||
" sc.w %1, %4, 0(%2) \n"
|
|
||||||
" bnez %1, 0b \n"
|
|
||||||
"1: \n"
|
|
||||||
: "+r" (old_value), "+r" (error)
|
|
||||||
: "r" (addr), "r" (compare_value), "r" (new_value)
|
|
||||||
);
|
|
||||||
return (old_value == compare_value);
|
|
||||||
#else
|
|
||||||
// Single core targets don't have atomic CAS instruction. So access method is the same for internal and external RAM
|
|
||||||
return rv_utils_compare_and_set(addr, compare_value, new_value);
|
return rv_utils_compare_and_set(addr, compare_value, new_value);
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
@@ -18,8 +18,6 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
//TODO: IDF-7771, P4, see jira to know what changed and what need to be checked
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
@@ -34,7 +32,12 @@ extern "C" {
|
|||||||
#define SPINLOCK_WAIT_FOREVER (-1)
|
#define SPINLOCK_WAIT_FOREVER (-1)
|
||||||
#define SPINLOCK_NO_WAIT 0
|
#define SPINLOCK_NO_WAIT 0
|
||||||
#define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
|
#define SPINLOCK_INITIALIZER {.owner = SPINLOCK_FREE,.count = 0}
|
||||||
|
|
||||||
|
#define SPINLOCK_OWNER_ID_0 0xCDCD /* Use these values to avoid 0 being a valid lock owner, same as CORE_ID_REGVAL_PRO on Xtensa */
|
||||||
|
#define SPINLOCK_OWNER_ID_1 0xABAB /* Same as CORE_ID_REGVAL_APP on Xtensa*/
|
||||||
|
|
||||||
#define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
|
#define CORE_ID_REGVAL_XOR_SWAP (0xCDCD ^ 0xABAB)
|
||||||
|
#define SPINLOCK_OWNER_ID_XOR_SWAP CORE_ID_REGVAL_XOR_SWAP
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
NEED_VOLATILE_MUX uint32_t owner;
|
NEED_VOLATILE_MUX uint32_t owner;
|
||||||
@@ -72,7 +75,7 @@ static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *l
|
|||||||
{
|
{
|
||||||
#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
|
#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
|
||||||
uint32_t irq_status;
|
uint32_t irq_status;
|
||||||
uint32_t core_id, other_core_id;
|
uint32_t core_owner_id, other_core_owner_id;
|
||||||
bool lock_set;
|
bool lock_set;
|
||||||
esp_cpu_cycle_count_t start_count;
|
esp_cpu_cycle_count_t start_count;
|
||||||
|
|
||||||
@@ -81,24 +84,23 @@ static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *l
|
|||||||
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
||||||
|
|
||||||
// Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
|
// Note: The core IDs are the full 32 bit (CORE_ID_REGVAL_PRO/CORE_ID_REGVAL_APP) values
|
||||||
core_id = xt_utils_get_raw_core_id();
|
core_owner_id = xt_utils_get_raw_core_id();
|
||||||
other_core_id = CORE_ID_REGVAL_XOR_SWAP ^ core_id;
|
|
||||||
#else //__riscv
|
#else //__riscv
|
||||||
irq_status = rv_utils_set_intlevel(RVHAL_EXCM_LEVEL);
|
|
||||||
|
|
||||||
core_id = rv_utils_get_core_id();
|
irq_status = rv_utils_mask_int_level_lower_than(RVHAL_EXCM_LEVEL);
|
||||||
other_core_id = 1 - core_id;
|
core_owner_id = rv_utils_get_core_id() == 0 ? SPINLOCK_OWNER_ID_0 : SPINLOCK_OWNER_ID_1;
|
||||||
#endif
|
#endif
|
||||||
|
other_core_owner_id = CORE_ID_REGVAL_XOR_SWAP ^ core_owner_id;
|
||||||
|
|
||||||
/* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
|
/* lock->owner should be one of SPINLOCK_FREE, CORE_ID_REGVAL_PRO,
|
||||||
* CORE_ID_REGVAL_APP:
|
* CORE_ID_REGVAL_APP:
|
||||||
* - If SPINLOCK_FREE, we want to atomically set to 'core_id'.
|
* - If SPINLOCK_FREE, we want to atomically set to 'core_owner_id'.
|
||||||
* - If "our" core_id, we can drop through immediately.
|
* - If "our" core_owner_id, we can drop through immediately.
|
||||||
* - If "other_core_id", we spin here.
|
* - If "other_core_owner_id", we spin here.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// The caller is already the owner of the lock. Simply increment the nesting count
|
// The caller is already the owner of the lock. Simply increment the nesting count
|
||||||
if (lock->owner == core_id) {
|
if (lock->owner == core_owner_id) {
|
||||||
assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
|
assert(lock->count > 0 && lock->count < 0xFF); // Bad count value implies memory corruption
|
||||||
lock->count++;
|
lock->count++;
|
||||||
#if __XTENSA__
|
#if __XTENSA__
|
||||||
@@ -116,7 +118,7 @@ static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *l
|
|||||||
* is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
|
* is the case for the majority of spinlock_acquire() calls (as spinlocks are free most of the time since they
|
||||||
* aren't meant to be held for long).
|
* aren't meant to be held for long).
|
||||||
*/
|
*/
|
||||||
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
|
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_owner_id);
|
||||||
if (lock_set || timeout == SPINLOCK_NO_WAIT) {
|
if (lock_set || timeout == SPINLOCK_NO_WAIT) {
|
||||||
// We've successfully taken the lock, or we are not retrying
|
// We've successfully taken the lock, or we are not retrying
|
||||||
goto exit;
|
goto exit;
|
||||||
@@ -125,7 +127,7 @@ static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *l
|
|||||||
// First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
|
// First attempt to take the lock has failed. Retry until the lock is taken, or until we timeout.
|
||||||
start_count = esp_cpu_get_cycle_count();
|
start_count = esp_cpu_get_cycle_count();
|
||||||
do {
|
do {
|
||||||
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_id);
|
lock_set = esp_cpu_compare_and_set(&lock->owner, SPINLOCK_FREE, core_owner_id);
|
||||||
if (lock_set) {
|
if (lock_set) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -134,11 +136,11 @@ static inline bool __attribute__((always_inline)) spinlock_acquire(spinlock_t *l
|
|||||||
|
|
||||||
exit:
|
exit:
|
||||||
if (lock_set) {
|
if (lock_set) {
|
||||||
assert(lock->owner == core_id);
|
assert(lock->owner == core_owner_id);
|
||||||
assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
|
assert(lock->count == 0); // This is the first time the lock is set, so count should still be 0
|
||||||
lock->count++; // Finally, we increment the lock count
|
lock->count++; // Finally, we increment the lock count
|
||||||
} else { // We timed out waiting for lock
|
} else { // We timed out waiting for lock
|
||||||
assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_id);
|
assert(lock->owner == SPINLOCK_FREE || lock->owner == other_core_owner_id);
|
||||||
assert(lock->count < 0xFF); // Bad count value implies memory corruption
|
assert(lock->count < 0xFF); // Bad count value implies memory corruption
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -171,19 +173,18 @@ static inline void __attribute__((always_inline)) spinlock_release(spinlock_t *l
|
|||||||
{
|
{
|
||||||
#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
|
#if !CONFIG_FREERTOS_UNICORE && !BOOTLOADER_BUILD
|
||||||
uint32_t irq_status;
|
uint32_t irq_status;
|
||||||
uint32_t core_id;
|
uint32_t core_owner_id;
|
||||||
|
|
||||||
assert(lock);
|
assert(lock);
|
||||||
#if __XTENSA__
|
#if __XTENSA__
|
||||||
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
irq_status = XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL);
|
||||||
|
|
||||||
core_id = xt_utils_get_raw_core_id();
|
core_owner_id = xt_utils_get_raw_core_id();
|
||||||
#else
|
#else
|
||||||
irq_status = rv_utils_set_intlevel(RVHAL_EXCM_LEVEL);
|
irq_status = rv_utils_mask_int_level_lower_than(RVHAL_EXCM_LEVEL);
|
||||||
|
core_owner_id = rv_utils_get_core_id() == 0 ? SPINLOCK_OWNER_ID_0 : SPINLOCK_OWNER_ID_1;
|
||||||
core_id = rv_utils_get_core_id();
|
|
||||||
#endif
|
#endif
|
||||||
assert(core_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
|
assert(core_owner_id == lock->owner); // This is a lock that we didn't acquire, or the lock is corrupt
|
||||||
lock->count--;
|
lock->count--;
|
||||||
|
|
||||||
if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
|
if (!lock->count) { // If this is the last recursive release of the lock, mark the lock as free
|
||||||
|
@@ -23,9 +23,6 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// interrupt module will mask interrupt with priority less than threshold
|
|
||||||
#define RVHAL_EXCM_LEVEL 4
|
|
||||||
|
|
||||||
typedef spinlock_t portMUX_TYPE;
|
typedef spinlock_t portMUX_TYPE;
|
||||||
|
|
||||||
/**< Spinlock initializer */
|
/**< Spinlock initializer */
|
||||||
|
@@ -104,9 +104,6 @@ typedef uint32_t TickType_t;
|
|||||||
#define portTASK_FUNCTION_PROTO(vFunction, pvParameters) void vFunction(void *pvParameters)
|
#define portTASK_FUNCTION_PROTO(vFunction, pvParameters) void vFunction(void *pvParameters)
|
||||||
#define portTASK_FUNCTION(vFunction, pvParameters) void vFunction(void *pvParameters)
|
#define portTASK_FUNCTION(vFunction, pvParameters) void vFunction(void *pvParameters)
|
||||||
|
|
||||||
// interrupt module will mask interrupt with priority less than threshold
|
|
||||||
#define RVHAL_EXCM_LEVEL 4
|
|
||||||
|
|
||||||
|
|
||||||
/* ----------------------------------------------- Port Configurations -------------------------------------------------
|
/* ----------------------------------------------- Port Configurations -------------------------------------------------
|
||||||
* - Configurations values supplied by each port
|
* - Configurations values supplied by each port
|
||||||
|
@@ -365,7 +365,7 @@ UBaseType_t xPortSetInterruptMaskFromISR(void)
|
|||||||
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
|
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
|
||||||
#else
|
#else
|
||||||
/* When CLIC is supported, all interrupt priority levels less than or equal to the threshold level are masked. */
|
/* When CLIC is supported, all interrupt priority levels less than or equal to the threshold level are masked. */
|
||||||
prev_int_level = rv_utils_set_intlevel(RVHAL_EXCM_LEVEL - 1);
|
prev_int_level = rv_utils_mask_int_level_lower_than(RVHAL_EXCM_LEVEL);
|
||||||
#endif /* !SOC_INIT_CLIC_SUPPORTED */
|
#endif /* !SOC_INIT_CLIC_SUPPORTED */
|
||||||
/**
|
/**
|
||||||
* In theory, this function should not return immediately as there is a
|
* In theory, this function should not return immediately as there is a
|
||||||
|
@@ -24,7 +24,8 @@ extern "C" {
|
|||||||
#define CSR_PCMR_MACHINE 0x7e1
|
#define CSR_PCMR_MACHINE 0x7e1
|
||||||
#define CSR_PCCR_MACHINE 0x7e2
|
#define CSR_PCCR_MACHINE 0x7e2
|
||||||
|
|
||||||
//TODO: IDF-7771
|
/* SW defined level which the interrupt module will mask interrupt with priority less than threshold during critical sections
|
||||||
|
and spinlocks */
|
||||||
#define RVHAL_EXCM_LEVEL 4
|
#define RVHAL_EXCM_LEVEL 4
|
||||||
|
|
||||||
/* --------------------------------------------------- CPU Control -----------------------------------------------------
|
/* --------------------------------------------------- CPU Control -----------------------------------------------------
|
||||||
@@ -162,6 +163,17 @@ FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_set_intlevel(
|
|||||||
|
|
||||||
return old_thresh;
|
return old_thresh;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FORCE_INLINE_ATTR uint32_t __attribute__((always_inline)) rv_utils_mask_int_level_lower_than(uint32_t intlevel)
|
||||||
|
{
|
||||||
|
#if SOC_INT_CLIC_SUPPORTED
|
||||||
|
/* CLIC's set interrupt level is inclusive, i.e. it does mask the set level */
|
||||||
|
return rv_utils_set_intlevel(intlevel - 1);
|
||||||
|
#else
|
||||||
|
return rv_utils_set_intlevel(intlevel);
|
||||||
|
#endif /* SOC_INT_CLIC_SUPPORTED */
|
||||||
|
}
|
||||||
|
|
||||||
#endif //#if (SOC_CPU_CORES_NUM > 1)
|
#endif //#if (SOC_CPU_CORES_NUM > 1)
|
||||||
|
|
||||||
FORCE_INLINE_ATTR uint32_t rv_utils_intr_get_enabled_mask(void)
|
FORCE_INLINE_ATTR uint32_t rv_utils_intr_get_enabled_mask(void)
|
||||||
@@ -295,9 +307,22 @@ FORCE_INLINE_ATTR void rv_utils_dbgr_break(void)
|
|||||||
|
|
||||||
FORCE_INLINE_ATTR bool rv_utils_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
|
FORCE_INLINE_ATTR bool rv_utils_compare_and_set(volatile uint32_t *addr, uint32_t compare_value, uint32_t new_value)
|
||||||
{
|
{
|
||||||
// ESP32C6 starts to support atomic CAS instructions, but it is still a single core target, no need to implement
|
#if __riscv_atomic
|
||||||
// through lr and sc instructions for now
|
uint32_t old_value = 0;
|
||||||
// For an RV target has no atomic CAS instruction, we can achieve atomicity by disabling interrupts
|
int error = 0;
|
||||||
|
|
||||||
|
/* Based on sample code for CAS from RISCV specs v2.2, atomic instructions */
|
||||||
|
__asm__ __volatile__(
|
||||||
|
"cas: lr.w %0, 0(%2) \n" // load 4 bytes from addr (%2) into old_value (%0)
|
||||||
|
" bne %0, %3, fail \n" // fail if old_value if not equal to compare_value (%3)
|
||||||
|
" sc.w %1, %4, 0(%2) \n" // store new_value (%4) into addr,
|
||||||
|
" bnez %1, cas \n" // if we failed to store the new value then retry the operation
|
||||||
|
"fail: \n"
|
||||||
|
: "+r" (old_value), "+r" (error) // output parameters
|
||||||
|
: "r" (addr), "r" (compare_value), "r" (new_value) // input parameters
|
||||||
|
);
|
||||||
|
#else
|
||||||
|
// For a single core RV target has no atomic CAS instruction, we can achieve atomicity by disabling interrupts
|
||||||
unsigned old_mstatus;
|
unsigned old_mstatus;
|
||||||
old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
|
old_mstatus = RV_CLEAR_CSR(mstatus, MSTATUS_MIE);
|
||||||
// Compare and set
|
// Compare and set
|
||||||
@@ -309,6 +334,7 @@ FORCE_INLINE_ATTR bool rv_utils_compare_and_set(volatile uint32_t *addr, uint32_
|
|||||||
// Restore interrupts
|
// Restore interrupts
|
||||||
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
|
RV_SET_CSR(mstatus, old_mstatus & MSTATUS_MIE);
|
||||||
|
|
||||||
|
#endif //__riscv_atomic
|
||||||
return (old_value == compare_value);
|
return (old_value == compare_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user