mirror of
https://github.com/espressif/esp-idf.git
synced 2025-08-04 13:14:32 +02:00
freertos(IDF): Restore vanilla call behavior on xTaskIncrementTick() and vTaskSwitchContext()
Due to SMP, critical sections have been added to xTaskIncrementTick() and vTaskSwitchContext() (to take the xKernelLock). However, this is technically not necessary when building for single-core as FreeRTOS expect these funcitons to be called with interrupts already disabled. This commit makes the critical secitons in those functions depend on "configNUM_CORES > 1", and ensures that interrupts are disabled when calling those functions. This effectively restores the vanilla behavior for these functions when building for single-core.
This commit is contained in:
@@ -180,14 +180,24 @@ BaseType_t xPortSysTickHandler(void)
|
|||||||
|
|
||||||
// Call FreeRTOS Increment tick function
|
// Call FreeRTOS Increment tick function
|
||||||
BaseType_t xSwitchRequired;
|
BaseType_t xSwitchRequired;
|
||||||
#if CONFIG_FREERTOS_UNICORE
|
#if ( configNUM_CORES > 1 )
|
||||||
xSwitchRequired = xTaskIncrementTick();
|
/*
|
||||||
#else
|
For SMP, xTaskIncrementTick() will internally enter a critical section. But only core 0 calls xTaskIncrementTick()
|
||||||
|
while core 1 should call xTaskIncrementTickOtherCores().
|
||||||
|
*/
|
||||||
if (xPortGetCoreID() == 0) {
|
if (xPortGetCoreID() == 0) {
|
||||||
xSwitchRequired = xTaskIncrementTick();
|
xSwitchRequired = xTaskIncrementTick();
|
||||||
} else {
|
} else {
|
||||||
xSwitchRequired = xTaskIncrementTickOtherCores();
|
xSwitchRequired = xTaskIncrementTickOtherCores();
|
||||||
}
|
}
|
||||||
|
#else // configNUM_CORES > 1
|
||||||
|
/*
|
||||||
|
Vanilla (single core) FreeRTOS expects that xTaskIncrementTick() cannot be interrupted (i.e., no nested interrupts).
|
||||||
|
Thus we have to disable interrupts before calling it.
|
||||||
|
*/
|
||||||
|
UBaseType_t uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
|
||||||
|
xSwitchRequired = xTaskIncrementTick();
|
||||||
|
portCLEAR_INTERRUPT_MASK_FROM_ISR(uxSavedInterruptStatus);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Check if yield is required
|
// Check if yield is required
|
||||||
|
@@ -3108,14 +3108,11 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
|
|||||||
|
|
||||||
BaseType_t xTaskIncrementTick( void )
|
BaseType_t xTaskIncrementTick( void )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
#if ( configNUM_CORES > 1 )
|
#if ( configNUM_CORES > 1 )
|
||||||
{
|
|
||||||
/* Only Core 0 should ever call this function. */
|
/* Only Core 0 should ever call this function. */
|
||||||
configASSERT( xPortGetCoreID() == 0 );
|
configASSERT( xPortGetCoreID() == 0 );
|
||||||
}
|
|
||||||
#endif /* ( configNUM_CORES > 1 ) */
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
#endif // ESP_PLATFORM
|
|
||||||
TCB_t * pxTCB;
|
TCB_t * pxTCB;
|
||||||
TickType_t xItemValue;
|
TickType_t xItemValue;
|
||||||
BaseType_t xSwitchRequired = pdFALSE;
|
BaseType_t xSwitchRequired = pdFALSE;
|
||||||
@@ -3125,15 +3122,13 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
* tasks to be unblocked. */
|
* tasks to be unblocked. */
|
||||||
traceTASK_INCREMENT_TICK( xTickCount );
|
traceTASK_INCREMENT_TICK( xTickCount );
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* We need a critical section here as we are about to access kernel data
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* structures:
|
* access kernel data structures (unlike single core which calls this
|
||||||
* - Other cores could be accessing them simultaneously
|
* function with interrupts disabled). */
|
||||||
* - Unlike other ports, we call xTaskIncrementTick() without disabling nested
|
|
||||||
* interrupts, which in turn is disabled by the critical section. */
|
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
@@ -3221,16 +3216,12 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
/* Preemption is on, but a context switch should
|
/* Preemption is on, but a context switch should
|
||||||
* only be performed if the unblocked task has a
|
* only be performed if the unblocked task has a
|
||||||
* priority that is equal to or higher than the
|
* priority that is equal to or higher than the
|
||||||
* currently executing task. */
|
* currently executing task.
|
||||||
#if defined( ESP_PLATFORM ) && ( configNUM_CORES > 1 )
|
*
|
||||||
|
* For SMP, since this function is only run on core
|
||||||
/* Since this function is only run on core 0, we
|
* 0, only need to switch contexts if the unblocked
|
||||||
* only need to switch contexts if the unblocked task
|
* task can run on core 0. */
|
||||||
* can run on core 0. */
|
if( ( taskCAN_RUN_ON_CORE( 0, pxTCB->xCoreID ) == pdTRUE ) && ( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) )
|
||||||
if( ( ( pxTCB->xCoreID == 0 ) || ( pxTCB->xCoreID == tskNO_AFFINITY ) ) && ( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority ) )
|
|
||||||
#else
|
|
||||||
if( pxTCB->uxPriority >= pxCurrentTCB[ 0 ]->uxPriority )
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
xSwitchRequired = pdTRUE;
|
xSwitchRequired = pdTRUE;
|
||||||
}
|
}
|
||||||
@@ -3260,23 +3251,22 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
}
|
}
|
||||||
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
|
||||||
#if ( configUSE_TICK_HOOK == 1 )
|
#if ( configUSE_TICK_HOOK == 1 )
|
||||||
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
|
TickType_t xPendedTicksTemp = xPendedTicks; /* Non-volatile copy. */
|
||||||
#endif /* configUSE_TICK_HOOK */
|
#endif /* configUSE_TICK_HOOK */
|
||||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
|
||||||
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
#if ( configUSE_TICK_HOOK == 1 )
|
#if ( configUSE_TICK_HOOK == 1 )
|
||||||
{
|
{
|
||||||
/* Guard against the tick hook being called when the pended tick
|
/* Guard against the tick hook being called when the pended tick
|
||||||
* count is being unwound (when the scheduler is being unlocked). */
|
* count is being unwound (when the scheduler is being unlocked). */
|
||||||
#ifdef ESP_PLATFORM
|
if( xPendedTicksTemp == ( TickType_t ) 0 )
|
||||||
if( xPendedCounts == ( TickType_t ) 0 )
|
|
||||||
#else
|
|
||||||
if( xPendedTicks == ( TickType_t ) 0 )
|
|
||||||
#endif
|
|
||||||
{
|
{
|
||||||
vApplicationTickHook();
|
vApplicationTickHook();
|
||||||
}
|
}
|
||||||
@@ -3303,10 +3293,12 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
++xPendedTicks;
|
++xPendedTicks;
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
|
||||||
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
/* The tick hook gets called at regular intervals, even if the
|
/* The tick hook gets called at regular intervals, even if the
|
||||||
* scheduler is locked. */
|
* scheduler is locked. */
|
||||||
@@ -3338,12 +3330,8 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
|
|
||||||
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
/* We need a critical section here as we are about to access kernel data
|
/* We need take the kernel lock here as we are about to access
|
||||||
* structures:
|
* kernel data structures. */
|
||||||
* - Other cores could be accessing them simultaneously
|
|
||||||
* - Unlike other ports, we call xTaskIncrementTick() without disabling
|
|
||||||
* nested interrupts, which in turn is disabled by the critical
|
|
||||||
* section. */
|
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
|
|
||||||
/* A task being unblocked cannot cause an immediate context switch
|
/* A task being unblocked cannot cause an immediate context switch
|
||||||
@@ -3379,7 +3367,8 @@ BaseType_t xTaskIncrementTick( void )
|
|||||||
}
|
}
|
||||||
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
|
||||||
|
|
||||||
/* Exit the critical section as we have finished accessing the kernel data structures. */
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
|
|
||||||
#if ( configUSE_PREEMPTION == 1 )
|
#if ( configUSE_PREEMPTION == 1 )
|
||||||
@@ -3612,14 +3601,14 @@ get_next_task:
|
|||||||
|
|
||||||
void vTaskSwitchContext( void )
|
void vTaskSwitchContext( void )
|
||||||
{
|
{
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
|
|
||||||
/* vTaskSwitchContext is called either from:
|
/* For SMP, we need to take the kernel lock here as we are about to
|
||||||
* - ISR dispatcher when return from an ISR (interrupts will already be disabled)
|
* access kernel data structures (unlike single core which calls this
|
||||||
* - vTaskSuspend() which is not in a critical section
|
* function with either interrupts disabled or when the scheduler hasn't
|
||||||
* Therefore, we enter a critical section ISR version to ensure safety */
|
* started yet). */
|
||||||
taskENTER_CRITICAL_ISR( &xKernelLock );
|
taskENTER_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
|
|
||||||
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
|
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
|
||||||
{
|
{
|
||||||
@@ -3708,10 +3697,12 @@ void vTaskSwitchContext( void )
|
|||||||
#endif // ESP_PLATFORM
|
#endif // ESP_PLATFORM
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ESP_PLATFORM
|
#if ( configNUM_CORES > 1 )
|
||||||
/* Exit the critical section previously entered */
|
|
||||||
|
/* Release the previously taken kernel lock as we have finished
|
||||||
|
* accessing the kernel data structures. */
|
||||||
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
taskEXIT_CRITICAL_ISR( &xKernelLock );
|
||||||
#endif // ESP_PLATFORM
|
#endif /* ( configNUM_CORES > 1 ) */
|
||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user