forked from espressif/esp-idf
freertos: Fix event group task list race condition
FreeRTOS synchronization primitives (e.g., queues, eventgroups) use various event lists (i.e., task lists) to track what tasks are blocked on a current primitive. Usually these event lists are accessed via one of the event lists functions (such as vTask[PlaceOn|RemoveFrom]UnorderedEventList()), which in turn ensure that the global task list spinlock (xTaskQueueMutex) is taken when accessing these lists. However, some functions in event_groups.c manually traverse their event lists. Thus if a tick interrupt occurs on another core during traversal and that tick interrupt unblocks a task on the event list being traversed, the event list will be corrupted. This commit modifies the following event_groups.c functions so that they take the global task list lock before traversing their event list. - xEventGroupSetBits() - vEventGroupDelete()
This commit is contained in:
@ -535,6 +535,10 @@ BaseType_t xMatchFound = pdFALSE;
|
|||||||
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
|
||||||
|
|
||||||
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
|
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
|
||||||
|
Thus we need call the function below to take the task list spinlock located in tasks.c. Not doing so will risk
|
||||||
|
the task list's being changed while be are traversing it. */
|
||||||
|
vTaskTakeEventListLock();
|
||||||
{
|
{
|
||||||
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
|
||||||
|
|
||||||
@ -606,6 +610,8 @@ BaseType_t xMatchFound = pdFALSE;
|
|||||||
bit was set in the control word. */
|
bit was set in the control word. */
|
||||||
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
pxEventBits->uxEventBits &= ~uxBitsToClear;
|
||||||
}
|
}
|
||||||
|
/* Release the previously held task list spinlock, then release the event group spinlock. */
|
||||||
|
vTaskReleaseEventListLock();
|
||||||
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
|
|
||||||
return pxEventBits->uxEventBits;
|
return pxEventBits->uxEventBits;
|
||||||
@ -620,6 +626,10 @@ const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
|||||||
traceEVENT_GROUP_DELETE( xEventGroup );
|
traceEVENT_GROUP_DELETE( xEventGroup );
|
||||||
|
|
||||||
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
taskENTER_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
|
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
|
||||||
|
Thus we need call the function below to take the task list spinlock located in tasks.c. Not doing so will risk
|
||||||
|
the task list's being changed while be are traversing it. */
|
||||||
|
vTaskTakeEventListLock();
|
||||||
{
|
{
|
||||||
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
|
||||||
{
|
{
|
||||||
@ -629,6 +639,8 @@ const List_t *pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
|
|||||||
xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
xTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* Release the previously held task list spinlock. */
|
||||||
|
vTaskReleaseEventListLock();
|
||||||
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
taskEXIT_CRITICAL( &pxEventBits->eventGroupMux );
|
||||||
|
|
||||||
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
|
||||||
|
@ -2369,6 +2369,23 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xIte
|
|||||||
*/
|
*/
|
||||||
void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION;
|
void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
||||||
|
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
|
||||||
|
*
|
||||||
|
* This function is a wrapper to take the "xTaskQueueMutex" spinlock of tasks.c.
|
||||||
|
* This lock is taken whenver any of the task lists or event lists are
|
||||||
|
* accessed/modified, such as when adding/removing tasks to/from the delayed
|
||||||
|
* task list or various event lists.
|
||||||
|
*
|
||||||
|
* This functions is meant to be called by xEventGroupSetBits() and
|
||||||
|
* vEventGroupDelete() as both those functions will access event lists (instead
|
||||||
|
* of delegating the entire responsibility to one of vTask...EventList()
|
||||||
|
* functions).
|
||||||
|
*/
|
||||||
|
void vTaskTakeEventListLock( void );
|
||||||
|
void vTaskReleaseEventListLock( void );
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
|
||||||
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
|
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
|
||||||
|
@ -3563,6 +3563,18 @@ UBaseType_t i, uxTargetCPU;
|
|||||||
}
|
}
|
||||||
/*-----------------------------------------------------------*/
|
/*-----------------------------------------------------------*/
|
||||||
|
|
||||||
|
void vTaskTakeEventListLock( void )
|
||||||
|
{
|
||||||
|
/* We call the tasks.c critical section macro to take xTaskQueueMutex */
|
||||||
|
taskENTER_CRITICAL(&xTaskQueueMutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void vTaskReleaseEventListLock( void )
|
||||||
|
{
|
||||||
|
/* We call the tasks.c critical section macro to release xTaskQueueMutex */
|
||||||
|
taskEXIT_CRITICAL(&xTaskQueueMutex);
|
||||||
|
}
|
||||||
|
|
||||||
BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
|
BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
|
||||||
{
|
{
|
||||||
TCB_t *pxUnblockedTCB;
|
TCB_t *pxUnblockedTCB;
|
||||||
|
Reference in New Issue
Block a user