diff --git a/components/freertos/FreeRTOS-Kernel/idf_changes.md b/components/freertos/FreeRTOS-Kernel/idf_changes.md index cbaf5d6e00..0047a70e14 100644 --- a/components/freertos/FreeRTOS-Kernel/idf_changes.md +++ b/components/freertos/FreeRTOS-Kernel/idf_changes.md @@ -117,6 +117,10 @@ The following functions were modified to accommodate SMP behavior: - Updated logic to determine whether sleep is possible in SMP by checking the status of both cores. - `prvCheckTasksWaitingTermination()` - Updated logic so that we don't delete tasks on `xTasksWaitingTermination` which are still currently running on the other core. +- `xTaskGetCurrentTaskHandle()` + - In SMP, the function will now disables interrupts to ensure that the calling task does not switch cores while fetching the current core's TCB. +- `xTaskGetSchedulerState()` + - In SMP, the function now disables interrupts to ensure that the calling task does not switch cores while checking its own copy of `uxSchedulerSuspended`. - `prvAddCurrentTaskToDelayedList()` - Added extra check to see if current blocking task has already been deleted by the other core. diff --git a/components/freertos/FreeRTOS-Kernel/tasks.c b/components/freertos/FreeRTOS-Kernel/tasks.c index 987821e178..d9b770ae3d 100644 --- a/components/freertos/FreeRTOS-Kernel/tasks.c +++ b/components/freertos/FreeRTOS-Kernel/tasks.c @@ -68,16 +68,19 @@ /* Some code sections require extra critical sections when building for SMP * ( configNUMBER_OF_CORES > 1 ). */ #if ( configNUMBER_OF_CORES > 1 ) - /* Macros that Enter/exit a critical section only when building for SMP */ - #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) - #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) - #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) - #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) - #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) - /* Macros that Enter/exit a critical section only when building for single-core */ - #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + /* Macros that enter/exit a critical section only when building for SMP */ + #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) taskENTER_CRITICAL_ISR( pxLock ) + #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) taskEXIT_CRITICAL_ISR( pxLock ) + #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskEnterCriticalSafeSMPOnly( pxLock ) + #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) prvTaskExitCriticalSafeSMPOnly( pxLock ) + /* Macros that enter/exit a critical section only when building for single-core */ + #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + /* Macros that enable/disable interrupts only when building for SMP */ + #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() portSET_INTERRUPT_MASK_FROM_ISR() + #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) portCLEAR_INTERRUPT_MASK_FROM_ISR( uxStatus ) static inline __attribute__( ( always_inline ) ) void prvTaskEnterCriticalSafeSMPOnly( portMUX_TYPE * pxLock ) @@ -114,16 +117,20 @@ } #else /* configNUMBER_OF_CORES > 1 */ - /* Macros that Enter/exit a critical section only when building for SMP */ + /* Macros that enter/exit a critical section only when building for SMP */ #define taskENTER_CRITICAL_SMP_ONLY( pxLock ) #define taskEXIT_CRITICAL_SMP_ONLY( pxLock ) #define taskENTER_CRITICAL_ISR_SMP_ONLY( pxLock ) #define taskEXIT_CRITICAL_ISR_SMP_ONLY( pxLock ) #define taskENTER_CRITICAL_SAFE_SMP_ONLY( pxLock ) #define taskEXIT_CRITICAL_SAFE_SMP_ONLY( pxLock ) - /* Macros that Enter/exit a critical section only when building for single-core */ - #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) - #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + /* Macros that enter/exit a critical section only when building for single-core */ + #define taskENTER_CRITICAL_SC_ONLY( pxLock ) taskENTER_CRITICAL( pxLock ) + #define taskEXIT_CRITICAL_SC_ONLY( pxLock ) taskEXIT_CRITICAL( pxLock ) + /* Macros that enable/disable interrupts only when building for SMP */ + #define taskDISABLE_INTERRUPTS_ISR_SMP_ONLY() ( ( UBaseType_t ) 0 ) + #define taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxStatus ) ( ( void ) uxStatus ) + #endif /* configNUMBER_OF_CORES > 1 */ #if ( configUSE_PREEMPTION == 0 ) @@ -4832,7 +4839,24 @@ static void prvResetNextTaskUnblockTime( void ) TaskHandle_t xTaskGetCurrentTaskHandle( void ) { - return xTaskGetCurrentTaskHandleForCore( portGET_CORE_ID() ); + TaskHandle_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + /* For SMP, we need to disable interrupts to ensure the caller does not + * switch cores in between portGET_CORE_ID() and fetching the current + * core's TCB. We use the ISR versions of interrupt macros as this + * function could be called inside critical sections. + * + * For single-core a critical section is not required as this is not + * called from an interrupt and the current TCB will always be the same + * for any individual execution thread. */ + uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); + { + xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; + } + taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); + + return xReturn; } #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ @@ -4843,10 +4867,18 @@ static void prvResetNextTaskUnblockTime( void ) BaseType_t xTaskGetSchedulerState( void ) { BaseType_t xReturn; + UBaseType_t uxSavedInterruptStatus; - /* For SMP, we need to take the kernel lock here as we are about to - * access kernel data structures. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); + /* For SMP, we need to disable interrupts here to ensure we don't switch + * cores midway. We forego taking the kernel lock here as a minor + * optimization as it is not required. + * + * - xSchedulerRunning is only ever set by core 0 atomically + * - Each core will only ever update its own copy of uxSchedulerSuspended. + * + * We use the ISR versions of interrupt macros as this function could be + * called inside critical sections. */ + uxSavedInterruptStatus = taskDISABLE_INTERRUPTS_ISR_SMP_ONLY(); { if( xSchedulerRunning == pdFALSE ) { @@ -4864,8 +4896,7 @@ static void prvResetNextTaskUnblockTime( void ) } } } - /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + taskEnable_INTERRUPTS_ISR_SMP_ONLY( uxSavedInterruptStatus ); return xReturn; } diff --git a/components/freertos/esp_additions/freertos_tasks_c_additions.h b/components/freertos/esp_additions/freertos_tasks_c_additions.h index 304bf116d2..371a967649 100644 --- a/components/freertos/esp_additions/freertos_tasks_c_additions.h +++ b/components/freertos/esp_additions/freertos_tasks_c_additions.h @@ -482,16 +482,10 @@ BaseType_t xTaskGetCoreID( TaskHandle_t xTask ) configASSERT( xCoreID < configNUMBER_OF_CORES ); configASSERT( xCoreID != tskNO_AFFINITY ); - /* For SMP, we need to take the kernel lock here as we are about to - * access kernel data structures. For single core, a critical section is - * not required as this is not called from an interrupt and the current - * TCB will always be the same for any individual execution thread. */ - taskENTER_CRITICAL_SMP_ONLY( &xKernelLock ); - { - xReturn = pxCurrentTCBs[ xCoreID ]; - } - /* Release the previously taken kernel lock. */ - taskEXIT_CRITICAL_SMP_ONLY( &xKernelLock ); + /* A critical section is not required as this function does not + * guarantee that the TCB will still be valid when this function + * returns. */ + xReturn = pxCurrentTCBs[ xCoreID ]; } #endif /* CONFIG_FREERTOS_SMP */ @@ -861,7 +855,7 @@ uint8_t * pxTaskGetStackStart( TaskHandle_t xTask ) struct _reent * __getreent( void ) { /* No lock needed because if this changes, we won't be running anymore. */ - TCB_t * pxCurTask = xTaskGetCurrentTaskHandle(); + TCB_t * pxCurTask = ( TCB_t * ) xTaskGetCurrentTaskHandle(); struct _reent * ret; if( pxCurTask == NULL )