Merge branch 'feature/freertos_10.4.3_revert_critical_section_macro' into 'master'

freertos: (Sync v10.4.3) Revert common critical section macro (2)

See merge request espressif/esp-idf!19527
This commit is contained in:
Darian
2022-08-12 23:41:44 +08:00
6 changed files with 276 additions and 333 deletions

View File

@@ -45,18 +45,6 @@
#include "timers.h"
#include "event_groups.h"
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &pxEventBits->eventGroupMux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
/* Lint e961, e750 and e9021 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
* for the header files above, but not in this file, in order to generate the
@@ -92,7 +80,7 @@ typedef struct EventGroupDef_t
#endif
#ifdef ESP_PLATFORM
portMUX_TYPE eventGroupMux; //Mutex required due to SMP
portMUX_TYPE xEventGroupLock; /* Spinlock required for SMP critical sections */
#endif // ESP_PLATFORM
} EventGroup_t;
@@ -150,7 +138,7 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
traceEVENT_GROUP_CREATE( pxEventBits );
#ifdef ESP_PLATFORM
portMUX_INITIALIZE( &pxEventBits->eventGroupMux );
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
#endif // ESP_PLATFORM
}
else
@@ -203,7 +191,7 @@ static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
#endif /* configSUPPORT_STATIC_ALLOCATION */
#ifdef ESP_PLATFORM
portMUX_INITIALIZE( &pxEventBits->eventGroupMux );
portMUX_INITIALIZE( &pxEventBits->xEventGroupLock );
#endif // ESP_PLATFORM
traceEVENT_GROUP_CREATE( pxEventBits );
@@ -240,7 +228,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
#endif
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -287,7 +275,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -316,7 +304,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
/* The task timed out, just return the current event bit value. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
{
uxReturn = pxEventBits->uxEventBits;
@@ -333,7 +321,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xTimeoutOccurred = pdTRUE;
}
@@ -383,7 +371,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
#endif
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -455,7 +443,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -483,7 +471,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
{
/* The task timed out, just return the current event bit value. */
uxReturn = pxEventBits->uxEventBits;
@@ -508,7 +496,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
xTimeoutOccurred = pdTRUE;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
}
else
{
@@ -539,7 +527,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
configASSERT( xEventGroup );
configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
{
traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
@@ -550,7 +538,7 @@ EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
/* Clear the bits. */
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
return uxReturn;
}
@@ -606,11 +594,10 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
pxList = &( pxEventBits->xTasksWaitingForBits );
pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
* Thus we need call the function below to take the task list spinlock located in tasks.c. Not doing so will risk
* the task list's being changed while be are traversing it. */
vTaskTakeEventListLock();
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock();
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -686,9 +673,9 @@ EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
pxEventBits->uxEventBits &= ~uxBitsToClear;
}
#ifdef ESP_PLATFORM // IDF-3755
/* Release the previously held task list spinlock, then release the event group spinlock. */
vTaskReleaseEventListLock();
taskEXIT_CRITICAL();
/* Release the previously taken kernel lock, then release the event group spinlock. */
vTaskReleaseKernelLock();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -705,12 +692,11 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
traceEVENT_GROUP_DELETE( xEventGroup );
// IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#ifdef ESP_PLATFORM
/* The critical section above only takes the event groups spinlock. However, we are about to traverse a task list.
* Thus we need call the function below to take the task list spinlock located in tasks.c. Not doing so will risk
* the task list's being changed while be are traversing it. */
vTaskTakeEventListLock();
/* We are about to traverse a task list which is a kernel data structure.
* Thus we need to call vTaskTakeKernelLock() to take the kernel lock. */
vTaskTakeKernelLock();
#endif
{
while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
@@ -722,10 +708,10 @@ void vEventGroupDelete( EventGroupHandle_t xEventGroup )
}
}
#ifdef ESP_PLATFORM
/* Release the previously held task list spinlock. */
vTaskReleaseEventListLock();
/* Release the previously taken kernel lock. */
vTaskReleaseKernelLock();
#endif
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
{

View File

@@ -3391,8 +3391,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
* THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN
* INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER.
*
* This function is a wrapper to take the "xTaskQueueMutex" spinlock of tasks.c.
* This lock is taken whenver any of the task lists or event lists are
* This function is a wrapper to take the "xKernelLock" spinlock of tasks.c.
* This lock is taken whenver any of the kernel's data structures are
* accessed/modified, such as when adding/removing tasks to/from the delayed
* task list or various event lists.
*
@@ -3401,8 +3401,8 @@ void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
* of delegating the entire responsibility to one of vTask...EventList()
* functions).
*/
void vTaskTakeEventListLock( void );
void vTaskReleaseEventListLock( void );
void vTaskTakeKernelLock( void );
void vTaskReleaseKernelLock( void );
#endif // ESP_PLATFORM
/*

View File

@@ -47,18 +47,6 @@
#include "croutine.h"
#endif
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &((Queue_t *)pxQueue)->mux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
* for the header files above, but not in this file, in order to generate the
@@ -148,7 +136,7 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
uint8_t ucQueueType;
#endif
#ifdef ESP_PLATFORM
portMUX_TYPE mux; //Mutex required due to SMP
portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */
#endif // ESP_PLATFORM
} xQUEUE;
@@ -183,8 +171,8 @@ typedef xQUEUE Queue_t;
* array position being vacant. */
PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
#ifdef ESP_PLATFORM
//Need to add queue registry mutex to protect against simultaneous access
static portMUX_TYPE queue_registry_spinlock = portMUX_INITIALIZER_UNLOCKED;
/* Spinlock required in SMP when accessing the queue registry */
static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED;
#endif // ESP_PLATFORM
#endif /* configQUEUE_REGISTRY_SIZE */
@@ -272,7 +260,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* accessing the queue event lists.
*/
#define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL(); \
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \
{ \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
{ \
@@ -283,7 +271,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
} \
} \
taskEXIT_CRITICAL()
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) )
/*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
@@ -296,11 +284,11 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
#ifdef ESP_PLATFORM
if( xNewQueue == pdTRUE )
{
portMUX_INITIALIZE(&pxQueue->mux);
portMUX_INITIALIZE( &( pxQueue->xQueueLock ) );
}
#endif // ESP_PLATFORM
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
@@ -339,7 +327,7 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* A value is returned for calling semantic consistency with previous
* versions. */
@@ -545,7 +533,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
/* In case this is a recursive mutex. */
pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
#ifdef ESP_PLATFORM
portMUX_INITIALIZE(&pxNewQueue->mux);
portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) );
#endif // ESP_PLATFORM
traceCREATE_MUTEX( pxNewQueue );
@@ -613,7 +601,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
#ifdef ESP_PLATFORM
Queue_t * const pxQueue = (Queue_t *)pxSemaphore;
#endif
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
{
@@ -624,7 +612,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
pxReturn = NULL;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return pxReturn;
} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
@@ -844,7 +832,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* interest of execution time efficiency. */
for( ; ; )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
/* Is there room on the queue now? The running task must be the
* highest priority task wanting to access the queue. If the head item
@@ -950,7 +938,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
}
#endif /* configUSE_QUEUE_SETS */
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return pdPASS;
}
else
@@ -959,7 +947,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
{
/* The queue was full and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* Return to the original privilege level before exiting
* the function. */
@@ -980,13 +968,13 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
}
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -1013,7 +1001,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
@@ -1027,7 +1015,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
/* Try again. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1038,7 +1026,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
/* The timeout has expired. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1086,7 +1074,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* post). */
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
@@ -1200,7 +1188,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
xReturn = errQUEUE_FULL;
}
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@@ -1250,7 +1238,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@@ -1369,7 +1357,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL;
}
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@@ -1404,7 +1392,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
* interest of execution time efficiency. */
for( ; ; )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@@ -1436,7 +1424,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return pdPASS;
}
else
@@ -1445,7 +1433,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
{
/* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
@@ -1463,13 +1451,13 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
}
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -1486,7 +1474,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
@@ -1506,7 +1494,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
* data. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1518,7 +1506,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
* back and attempt to read the data. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1567,7 +1555,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
* of execution time efficiency. */
for( ; ; )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
/* Semaphores are queues with an item size of 0, and where the
* number of messages in the queue is the semaphore's count value. */
@@ -1616,7 +1604,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return pdPASS;
}
else
@@ -1634,7 +1622,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
/* The semaphore count was 0 and no block time is specified
* (or the block time has expired) so exit now. */
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
@@ -1652,13 +1640,13 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
}
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* Interrupts and other tasks can give to and take from the semaphore
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -1679,11 +1667,11 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
{
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
}
else
{
@@ -1695,7 +1683,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
@@ -1715,7 +1703,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
* attempt to take the semaphore again. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1726,7 +1714,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
/* Timed out. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1744,7 +1732,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
* test the mutex type again to check it is actually a mutex. */
if( xInheritanceOccurred != pdFALSE )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
UBaseType_t uxHighestWaitingPriority;
@@ -1756,7 +1744,7 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
}
}
#endif /* configUSE_MUTEXES */
@@ -1801,7 +1789,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
* interest of execution time efficiency. */
for( ; ; )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@@ -1839,7 +1827,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return pdPASS;
}
else
@@ -1848,7 +1836,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
{
/* The queue was empty and no block time is specified (or
* the block time has expired) so leave now. */
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
@@ -1867,13 +1855,13 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
}
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -1890,7 +1878,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
@@ -1910,7 +1898,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
* state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1922,7 +1910,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
* exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1970,7 +1958,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
{
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
@@ -2029,7 +2017,7 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
xReturn = pdFAIL;
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
}
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
}
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@@ -2066,7 +2054,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
{
/* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
@@ -2087,7 +2075,7 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
}
}
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
return xReturn;
@@ -2101,11 +2089,11 @@ UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
configASSERT( xQueue );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
@@ -2118,11 +2106,11 @@ UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
configASSERT( pxQueue );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
@@ -2352,7 +2340,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
* removed from the queue while the queue was locked. When a queue is
* locked items can be added or removed, but the event lists cannot be
* updated. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
int8_t cTxLock = pxQueue->cTxLock;
@@ -2430,10 +2418,10 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cTxLock = queueUNLOCKED;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* Do the same for the Rx lock. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
int8_t cRxLock = pxQueue->cRxLock;
@@ -2460,14 +2448,14 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cRxLock = queueUNLOCKED;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
}
/*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
{
BaseType_t xReturn;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
{
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
{
@@ -2478,7 +2466,7 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
xReturn = pdFALSE;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
return xReturn;
}
@@ -2837,7 +2825,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
{
UBaseType_t ux;
portENTER_CRITICAL(&queue_registry_spinlock);
taskENTER_CRITICAL( &xQueueRegistryLock );
/* See if there is an empty space in the registry. A NULL name denotes
* a free slot. */
for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
@@ -2856,7 +2844,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER();
}
}
portEXIT_CRITICAL(&queue_registry_spinlock);
taskEXIT_CRITICAL( &xQueueRegistryLock );
}
#endif /* configQUEUE_REGISTRY_SIZE */
@@ -2869,7 +2857,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
UBaseType_t ux;
const char * pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
portENTER_CRITICAL(&queue_registry_spinlock);
taskENTER_CRITICAL( &xQueueRegistryLock );
/* Note there is nothing here to protect against another task adding or
* removing entries from the registry while it is being searched. */
@@ -2886,7 +2874,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER();
}
}
portEXIT_CRITICAL(&queue_registry_spinlock);
taskEXIT_CRITICAL( &xQueueRegistryLock );
return pcReturn;
} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
@@ -2900,7 +2888,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
{
UBaseType_t ux;
portENTER_CRITICAL(&queue_registry_spinlock);
taskENTER_CRITICAL( &xQueueRegistryLock );
/* See if the handle of the queue being unregistered in actually in the
* registry. */
@@ -2922,7 +2910,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER();
}
}
portEXIT_CRITICAL(&queue_registry_spinlock);
taskEXIT_CRITICAL( &xQueueRegistryLock );
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
@@ -2993,7 +2981,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
Queue_t * pxQueue = (Queue_t * )xQueueOrSemaphore;
#endif
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
{
@@ -3012,7 +3000,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
xReturn = pdPASS;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return xReturn;
}
@@ -3045,12 +3033,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
#ifdef ESP_PLATFORM
Queue_t* pxQueue = (Queue_t*)pxQueueOrSemaphore;
#endif
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{
/* The queue is no longer contained in the set. */
pxQueueOrSemaphore->pxQueueSetContainer = NULL;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
xReturn = pdPASS;
}
@@ -3103,7 +3091,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
//Acquire the Queue set's spinlock
portENTER_CRITICAL(&(pxQueueSetContainer->mux));
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
@@ -3146,7 +3134,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
}
//Release the Queue set's spinlock
portEXIT_CRITICAL(&(pxQueueSetContainer->mux));
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
return xReturn;
}

View File

@@ -45,18 +45,6 @@
#include "task.h"
#include "stream_buffer.h"
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &pxStreamBuffer->xStreamBufferMux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
#if ( configUSE_TASK_NOTIFICATIONS != 1 )
#error configUSE_TASK_NOTIFICATIONS must be set to 1 to build stream_buffer.c
#endif
@@ -75,7 +63,7 @@
#ifndef sbRECEIVE_COMPLETED
#ifdef ESP_PLATFORM // IDF-3775
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
taskENTER_CRITICAL(); \
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToSend != NULL ) \
{ \
@@ -85,7 +73,7 @@
( pxStreamBuffer )->xTaskWaitingToSend = NULL; \
} \
} \
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
#else
#define sbRECEIVE_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
@@ -129,7 +117,7 @@
#ifndef sbSEND_COMPLETED
#ifdef ESP_PLATFORM // IDF-3755
#define sbSEND_COMPLETED( pxStreamBuffer ) \
taskENTER_CRITICAL(); \
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) ); \
{ \
if( ( pxStreamBuffer )->xTaskWaitingToReceive != NULL ) \
{ \
@@ -139,7 +127,7 @@
( pxStreamBuffer )->xTaskWaitingToReceive = NULL; \
} \
} \
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
#else
#define sbSEND_COMPLETED( pxStreamBuffer ) \
vTaskSuspendAll(); \
@@ -202,8 +190,7 @@ typedef struct StreamBufferDef_t /*lint !e9058 Style convention
UBaseType_t uxStreamBufferNumber; /* Used for tracing purposes. */
#endif
#ifdef ESP_PLATFORM
/* Mutex required due to SMP. This field shall be the last one of the structure. */
portMUX_TYPE xStreamBufferMux;
portMUX_TYPE xStreamBufferLock; /* Spinlock required for SMP critical sections */
#endif // ESP_PLATFORM
} StreamBuffer_t;
@@ -485,7 +472,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
#endif
/* Can only reset a message buffer if there are no tasks blocked on it. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
{
if( pxStreamBuffer->xTaskWaitingToReceive == NULL )
{
@@ -520,7 +507,7 @@ BaseType_t xStreamBufferReset( StreamBufferHandle_t xStreamBuffer )
}
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
return xReturn;
}
@@ -657,7 +644,7 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
{
/* Wait until the required number of bytes are free in the message
* buffer. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
{
xSpace = xStreamBufferSpacesAvailable( pxStreamBuffer );
@@ -672,11 +659,11 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
}
else
{
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
break;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer );
( void ) xTaskNotifyWait( ( uint32_t ) 0, ( uint32_t ) 0, NULL, xTicksToWait );
@@ -855,7 +842,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
{
/* Checking if there is data and clearing the notification state must be
* performed atomically. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
{
xBytesAvailable = prvBytesInBuffer( pxStreamBuffer );
@@ -878,7 +865,7 @@ size_t xStreamBufferReceive( StreamBufferHandle_t xStreamBuffer,
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &( pxStreamBuffer->xStreamBufferLock ) );
if( xBytesAvailable <= xBytesToStoreMessageLength )
{
@@ -1358,7 +1345,7 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
pxStreamBuffer->xTriggerLevelBytes = xTriggerLevelBytes;
pxStreamBuffer->ucFlags = ucFlags;
#ifdef ESP_PLATFORM
portMUX_INITIALIZE( &pxStreamBuffer->xStreamBufferMux );
portMUX_INITIALIZE( &( pxStreamBuffer->xStreamBufferLock ) );
#endif // ESP_PLATFORM
}
@@ -1386,8 +1373,8 @@ static void prvInitialiseNewStreamBuffer( StreamBuffer_t * const pxStreamBuffer,
/* Do not include the spinlock in the part to reset!
* Thus, make sure the spinlock is the last field of the structure. */
_Static_assert( offsetof(StreamBuffer_t, xStreamBufferMux) == sizeof( StreamBuffer_t ) - sizeof(portMUX_TYPE),
"xStreamBufferMux must be the last field of structure StreamBuffer_t" );
_Static_assert( offsetof(StreamBuffer_t, xStreamBufferLock) == sizeof( StreamBuffer_t ) - sizeof(portMUX_TYPE),
"xStreamBufferLock must be the last field of structure StreamBuffer_t" );
const size_t erasable = sizeof( StreamBuffer_t ) - sizeof(portMUX_TYPE);
( void ) memset( ( void * ) pxStreamBuffer, 0x00, erasable ); /*lint !e9087 memset() requires void *. */
pxStreamBuffer->pucBuffer = pucBuffer;

View File

@@ -47,15 +47,6 @@
#include "stack_macros.h"
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &xTaskQueueMutex
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#undef _REENT_INIT_PTR
#define _REENT_INIT_PTR esp_reent_init
extern void esp_vApplicationIdleHook(void);
@@ -395,7 +386,9 @@ PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Poi
PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
#ifdef ESP_PLATFORM
PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
/* Spinlock required for SMP critical sections. This lock protects all of the
* kernel's data structures such as various tasks lists, flags, and tick counts. */
PRIVILEGED_DATA static portMUX_TYPE xKernelLock = portMUX_INITIALIZER_UNLOCKED;
#endif // ESP_PLATFORM
#if ( INCLUDE_vTaskDelete == 1 )
@@ -1252,7 +1245,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
/* Ensure interrupts don't access the task lists while the lists are being
* updated. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
uxCurrentNumberOfTasks++;
@@ -1346,13 +1339,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
portSETUP_TCB( pxNewTCB );
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
if( xSchedulerRunning != pdFALSE )
{
/* If the created task is of a higher priority than the current task
* then it should run now. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
/* If the created task is of a higher priority than the current task
* then it should run now. */
@@ -1365,7 +1358,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
else
{
@@ -1425,7 +1418,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
BaseType_t core;
BaseType_t xFreeNow = 0;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
core = xPortGetCoreID();
curTCB = pxCurrentTCB[core];
@@ -1490,13 +1483,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
{
/* SMP case of deleting a task running on a different core. Same issue
as a task deleting itself, but we need to send a yield to this task now
before we release xTaskQueueMutex.
before we release xKernelLock.
Specifically there is a case where the other core may already be spinning on
xTaskQueueMutex waiting to go into a blocked state. A check is added in
xKernelLock waiting to go into a blocked state. A check is added in
prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
xTasksWaitingTermination list in this case (instead it will immediately
release xTaskQueueMutex again and be yielded before the FreeRTOS function
release xKernelLock again and be yielded before the FreeRTOS function
returns.) */
vPortYieldOtherCore( !core );
}
@@ -1513,7 +1506,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
traceTASK_DELETE( pxTCB );
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
if(xFreeNow == pdTRUE) {
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
@@ -1568,7 +1561,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -1628,7 +1621,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1662,7 +1655,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
{
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -1679,7 +1672,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToDelay );
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else
xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -1706,7 +1699,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
configASSERT( pxTCB );
taskENTER_CRITICAL(); //Need critical section incase either core context switches in between
taskENTER_CRITICAL( &xKernelLock ); //Need critical section incase either core context switches in between
if( pxTCB == pxCurrentTCB[xPortGetCoreID()])
{
/* The task calling this function is querying its own state. */
@@ -1790,7 +1783,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
eReturn = eReady;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return eReturn;
} /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
@@ -1805,14 +1798,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
TCB_t const * pxTCB;
UBaseType_t uxReturn;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* If null is passed in here then it is the priority of the task
* that called uxTaskPriorityGet() that is being queried. */
pxTCB = prvGetTCBFromHandle( xTask );
uxReturn = pxTCB->uxPriority;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return uxReturn;
}
@@ -1845,14 +1838,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
portENTER_CRITICAL_ISR(&xTaskQueueMutex );
taskENTER_CRITICAL_ISR( &xKernelLock );
{
/* If null is passed in here then it is the priority of the calling
* task that is being queried. */
pxTCB = prvGetTCBFromHandle( xTask );
uxReturn = pxTCB->uxPriority;
}
portEXIT_CRITICAL_ISR(&xTaskQueueMutex);
taskEXIT_CRITICAL_ISR( &xKernelLock );
return uxReturn;
}
@@ -1881,7 +1874,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
mtCOVERAGE_TEST_MARKER();
}
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* If null is passed in here then it is the priority of the calling
* task that is being changed. */
@@ -2020,7 +2013,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
( void ) uxPriorityUsedOnEntry;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
#endif /* INCLUDE_vTaskPrioritySet */
@@ -2033,7 +2026,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
TCB_t * pxTCB;
TCB_t * curTCB;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* If null is passed in here then it is the running task that is
* being suspended. */
@@ -2081,17 +2074,17 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
}
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
if( xSchedulerRunning != pdFALSE )
{
/* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
else
{
@@ -2103,9 +2096,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
if( xSchedulerRunning != pdFALSE )
{
/* The current task has just been suspended. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()];
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
configASSERT( suspended == 0 );
(void)suspended;
@@ -2122,9 +2115,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
* NULL so when the next task is created pxCurrentTCB will
* be set to point to it no matter what its relative priority
* is. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
pxCurrentTCB[ xPortGetCoreID() ] = NULL;
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
else
{
@@ -2139,11 +2132,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
/* A task other than the currently running task was suspended,
* reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
prvResetNextTaskUnblockTime();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
else
{
@@ -2211,7 +2204,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
/* It does not make sense to resume the calling task. */
configASSERT( xTaskToResume );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* The parameter cannot be NULL as it is impossible to resume the
* currently executing task. */
@@ -2249,7 +2242,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
#endif /* INCLUDE_vTaskSuspend */
@@ -2283,7 +2276,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
* https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &xKernelLock );
{
if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
{
@@ -2324,7 +2317,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
return xYieldRequired;
}
@@ -2540,7 +2533,7 @@ void vTaskSuspendAll( void )
TickType_t xReturn;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
{
xReturn = 0;
@@ -2567,7 +2560,7 @@ void vTaskSuspendAll( void )
{
xReturn = xNextTaskUnblockTime - xTickCount;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -2589,7 +2582,7 @@ BaseType_t xTaskResumeAll( void )
* removed task will have been added to the xPendingReadyList. Once the
* scheduler has been resumed it is safe to move all the pending ready
* tasks from this list into their appropriate ready list. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
#ifdef ESP_PLATFORM
/* Minor optimization. Core ID can't change while inside a critical section */
@@ -2691,7 +2684,7 @@ BaseType_t xTaskResumeAll( void )
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xAlreadyYielded;
}
@@ -2838,7 +2831,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -2888,7 +2881,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
#endif
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -2908,7 +2901,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -2971,7 +2964,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -3011,7 +3004,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
{
#ifdef ESP_PLATFORM
/* For SMP, we require a critical section to access xTickCount */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#endif
/* Correct the tick count value after a period during which the tick
* was suppressed. Note this does *not* call the tick hook function for
@@ -3020,7 +3013,7 @@ char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char
xTickCount += xTicksToJump;
traceINCREASE_TICK_COUNT( xTicksToJump );
#ifdef ESP_PLATFORM
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#endif
}
@@ -3045,9 +3038,9 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
#ifdef ESP_PLATFORM
/* For SMP, we still require a critical section to access xPendedTicks even
* if the scheduler is disabled. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
xPendedTicks += xTicksToCatchUp;
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else // ESP_PLATFORM
xPendedTicks += xTicksToCatchUp;
#endif // ESP_PLATFORM
@@ -3067,7 +3060,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
configASSERT( pxTCB );
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -3087,7 +3080,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
* the event list too. Interrupts can touch the event list item,
* even though the scheduler is suspended, so a critical section
* is used. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
{
@@ -3103,7 +3096,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
/* Place the unblocked task into the appropriate ready list. */
prvAddTaskToReadyList( pxTCB );
@@ -3134,7 +3127,7 @@ BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -3170,7 +3163,7 @@ BaseType_t xTaskIncrementTick( void )
* - Other cores could be accessing them simultaneously
* - Unlike other ports, we call xTaskIncrementTick() without disabling nested
* interrupts, which in turn is disabled by the critical section. */
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &xKernelLock );
#endif // ESP_PLATFORM
if( uxSchedulerSuspended[ 0 ] == ( UBaseType_t ) pdFALSE )
@@ -3302,7 +3295,7 @@ BaseType_t xTaskIncrementTick( void )
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
#endif /* configUSE_TICK_HOOK */
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
#endif // ESP_PLATFORM
#if ( configUSE_TICK_HOOK == 1 )
@@ -3342,7 +3335,7 @@ BaseType_t xTaskIncrementTick( void )
++xPendedTicks;
#ifdef ESP_PLATFORM
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
#endif // ESP_PLATFORM
/* The tick hook gets called at regular intervals, even if the
@@ -3381,7 +3374,7 @@ BaseType_t xTaskIncrementTick( void )
* - Unlike other ports, we call xTaskIncrementTick() without disabling
* nested interrupts, which in turn is disabled by the critical
* section. */
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &xKernelLock );
/* A task being unblocked cannot cause an immediate context switch
* if preemption is turned off. */
@@ -3415,7 +3408,7 @@ BaseType_t xTaskIncrementTick( void )
#endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
/* Exit the critical section as we have finished accessing the kernel data structures. */
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
#if ( configUSE_PREEMPTION == 1 )
{
@@ -3463,11 +3456,11 @@ BaseType_t xTaskIncrementTick( void )
/* Save the hook function in the TCB. A critical section is required as
* the value can be accessed from an interrupt. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
xTCB->pxTaskTag = pxHookFunction;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
#endif /* configUSE_APPLICATION_TASK_TAG */
@@ -3485,11 +3478,11 @@ BaseType_t xTaskIncrementTick( void )
/* Save the hook function in the TCB. A critical section is required as
* the value can be accessed from an interrupt. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
xReturn = pxTCB->pxTaskTag;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -3513,7 +3506,7 @@ BaseType_t xTaskIncrementTick( void )
/* Save the hook function in the TCB. A critical section is required as
* the value can be accessed from an interrupt. */
#ifdef ESP_PLATFORM
portENTER_CRITICAL_ISR(&xTaskQueueMutex);
taskENTER_CRITICAL_ISR( &xKernelLock );
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
@@ -3521,7 +3514,7 @@ BaseType_t xTaskIncrementTick( void )
xReturn = pxTCB->pxTaskTag;
}
#ifdef ESP_PLATFORM
portEXIT_CRITICAL_ISR(&xTaskQueueMutex);
taskEXIT_CRITICAL_ISR( &xKernelLock );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
@@ -3659,7 +3652,7 @@ void vTaskSwitchContext( void )
* - ISR dispatcher when return from an ISR (interrupts will already be disabled)
* - vTaskSuspend() which is not in a critical section
* Therefore, we enter a critical section ISR version to ensure safety */
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &xKernelLock );
#endif // ESP_PLATFORM
if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
{
@@ -3749,7 +3742,7 @@ void vTaskSwitchContext( void )
}
#ifdef ESP_PLATFORM
/* Exit the critical section previously entered */
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
#endif // ESP_PLATFORM
}
/*-----------------------------------------------------------*/
@@ -3758,7 +3751,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
const TickType_t xTicksToWait )
{
configASSERT( pxEventList );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
/* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
* SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
@@ -3770,7 +3763,7 @@ void vTaskPlaceOnEventList( List_t * const pxEventList,
vListInsert( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait);
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
/*-----------------------------------------------------------*/
@@ -3779,7 +3772,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
const TickType_t xTicksToWait )
{
configASSERT( pxEventList );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
/* Store the item value in the event list item. It is safe to access the
* event list item here as interrupts won't access the event list item of a
@@ -3794,7 +3787,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
vListInsertEnd( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
/*-----------------------------------------------------------*/
@@ -3802,7 +3795,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
configASSERT( pxEventList );
/* This function should not be called by application code hence the
@@ -3827,7 +3820,7 @@ void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
traceTASK_DELAY_UNTIL( );
prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
#endif /* configUSE_TIMERS */
@@ -3840,7 +3833,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
BaseType_t xTaskCanBeReady;
UBaseType_t i, uxTargetCPU;
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &xKernelLock );
/* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
* called from a critical section within an ISR. */
@@ -3862,7 +3855,7 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
}
else
{
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
return pdFALSE;
}
@@ -3932,22 +3925,22 @@ BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
}
#endif
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
return xReturn;
}
/*-----------------------------------------------------------*/
#ifdef ESP_PLATFORM
void vTaskTakeEventListLock( void )
void vTaskTakeKernelLock( void )
{
/* We call the tasks.c critical section macro to take xTaskQueueMutex */
taskENTER_CRITICAL();
/* We call the tasks.c critical section macro to take xKernelLock */
taskENTER_CRITICAL( &xKernelLock );
}
void vTaskReleaseEventListLock( void )
void vTaskReleaseKernelLock( void )
{
/* We call the tasks.c critical section macro to release xTaskQueueMutex */
taskEXIT_CRITICAL();
/* We call the tasks.c critical section macro to release xKernelLock */
taskEXIT_CRITICAL( &xKernelLock );
}
#endif // ESP_PLATFORM
@@ -3956,7 +3949,7 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
{
TCB_t * pxUnblockedTCB;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
/* Store the new item value in the event list. */
listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
@@ -3984,19 +3977,19 @@ void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
/*-----------------------------------------------------------*/
void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
{
configASSERT( pxTimeOut );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
pxTimeOut->xOverflowCount = xNumOfOverflows;
pxTimeOut->xTimeOnEntering = xTickCount;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
/*-----------------------------------------------------------*/
@@ -4016,7 +4009,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
*/
#if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
configASSERT( pxTimeOut );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
/* For internal use only as it does not use a critical section. */
@@ -4024,7 +4017,7 @@ void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
pxTimeOut->xTimeOnEntering = xTickCount;
#if ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#endif // ( ( ESP_PLATFORM == 1 ) && ( configNUM_CORES > 1 ) )
}
/*-----------------------------------------------------------*/
@@ -4037,7 +4030,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
configASSERT( pxTimeOut );
configASSERT( pxTicksToWait );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* Minor optimisation. The tick count cannot change in this block. */
const TickType_t xConstTickCount = xTickCount;
@@ -4087,7 +4080,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
xReturn = pdTRUE;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -4236,7 +4229,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
{
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -4264,7 +4257,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -4287,7 +4280,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
const UBaseType_t uxNonApplicationTasks = 1;
eSleepModeStatus eReturn = eStandardSleep;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
if( listCURRENT_LIST_LENGTH( &xPendingReadyList[xPortGetCoreID()] ) != 0 )
{
/* A task was made ready while the scheduler was suspended. */
@@ -4313,7 +4306,7 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return eReturn;
}
@@ -4331,11 +4324,11 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
pxTCB = prvGetTCBFromHandle( xTaskToSet );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
}
@@ -4354,10 +4347,10 @@ static portTASK_FUNCTION( prvIdleTask, pvParameters )
if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
pxTCB = prvGetTCBFromHandle( xTaskToSet );
pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
}
#endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
@@ -4454,13 +4447,13 @@ static void prvCheckTasksWaitingTermination( void )
BaseType_t xListIsEmpty;
BaseType_t core = xPortGetCoreID();
/* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
/* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL( &xKernelLock )
* being called too often in the idle task. */
while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
{
TCB_t *pxTCB = NULL;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
if( xListIsEmpty == pdFALSE )
@@ -4487,7 +4480,7 @@ static void prvCheckTasksWaitingTermination( void )
}
}
}
taskEXIT_CRITICAL(); //Need to call deletion callbacks outside critical section
taskEXIT_CRITICAL( &xKernelLock ); //Need to call deletion callbacks outside critical section
if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
@@ -4569,7 +4562,7 @@ static void prvCheckTasksWaitingTermination( void )
if( eState == eSuspended )
{
#ifdef ESP_PLATFORM // IDF-3755
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -4580,7 +4573,7 @@ static void prvCheckTasksWaitingTermination( void )
}
}
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -4935,7 +4928,7 @@ static void prvResetNextTaskUnblockTime( void )
TCB_t * const pxMutexHolderTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
/* If the mutex was given back by an interrupt while the queue was
* locked then the mutex holder might now be NULL. _RB_ Is this still
* needed as interrupts can no longer use mutexes? */
@@ -5012,7 +5005,7 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -5027,7 +5020,7 @@ static void prvResetNextTaskUnblockTime( void )
TCB_t * const pxTCB = pxMutexHolder;
BaseType_t xReturn = pdFALSE;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
if( pxMutexHolder != NULL )
{
/* A task can only have an inherited priority if it holds the mutex.
@@ -5094,7 +5087,7 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -5111,7 +5104,7 @@ static void prvResetNextTaskUnblockTime( void )
UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
if( pxMutexHolder != NULL )
{
/* If pxMutexHolder is not NULL then the holder must hold at least
@@ -5204,7 +5197,7 @@ static void prvResetNextTaskUnblockTime( void )
{
mtCOVERAGE_TEST_MARKER();
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
}
#endif /* configUSE_MUTEXES */
@@ -5539,13 +5532,13 @@ TickType_t uxTaskResetEventItemValue( void )
{
TickType_t uxReturn;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
/* Reset the event list item to its normal value - so it can be used with
* queues and semaphores. */
listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return uxReturn;
}
@@ -5559,13 +5552,13 @@ TickType_t uxTaskResetEventItemValue( void )
/* If xSemaphoreCreateMutex() is called before any tasks have been created
* then pxCurrentTCB will be NULL. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
{
( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
}
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return curTCB;
}
@@ -5593,7 +5586,7 @@ TickType_t uxTaskResetEventItemValue( void )
configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* Only block if the notification count is not already non-zero. */
if( pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ] == 0UL )
@@ -5622,9 +5615,9 @@ TickType_t uxTaskResetEventItemValue( void )
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
traceTASK_NOTIFY_TAKE( uxIndexToWait );
ulReturn = pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ];
@@ -5647,7 +5640,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return ulReturn;
}
@@ -5679,7 +5672,7 @@ TickType_t uxTaskResetEventItemValue( void )
configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* Only block if a notification is not already pending. */
if( pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
@@ -5713,9 +5706,9 @@ TickType_t uxTaskResetEventItemValue( void )
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
traceTASK_NOTIFY_WAIT( uxIndexToWait );
@@ -5745,7 +5738,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -5769,7 +5762,7 @@ TickType_t uxTaskResetEventItemValue( void )
configASSERT( xTaskToNotify );
pxTCB = xTaskToNotify;
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
if( pulPreviousNotificationValue != NULL )
{
@@ -5868,7 +5861,7 @@ TickType_t uxTaskResetEventItemValue( void )
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -5912,7 +5905,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify;
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &xKernelLock );
{
if( pulPreviousNotificationValue != NULL )
{
@@ -6006,7 +5999,7 @@ TickType_t uxTaskResetEventItemValue( void )
}
}
}
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
return xReturn;
}
@@ -6047,7 +6040,7 @@ TickType_t uxTaskResetEventItemValue( void )
pxTCB = xTaskToNotify;
taskENTER_CRITICAL_ISR();
taskENTER_CRITICAL_ISR( &xKernelLock );
{
ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
@@ -6097,7 +6090,7 @@ TickType_t uxTaskResetEventItemValue( void )
}
}
}
taskEXIT_CRITICAL_ISR();
taskEXIT_CRITICAL_ISR( &xKernelLock );
}
#endif /* configUSE_TASK_NOTIFICATIONS */
@@ -6117,7 +6110,7 @@ TickType_t uxTaskResetEventItemValue( void )
* its notification state cleared. */
pxTCB = prvGetTCBFromHandle( xTask );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
{
@@ -6129,7 +6122,7 @@ TickType_t uxTaskResetEventItemValue( void )
xReturn = pdFAIL;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return xReturn;
}
@@ -6150,14 +6143,14 @@ TickType_t uxTaskResetEventItemValue( void )
* its notification state cleared. */
pxTCB = prvGetTCBFromHandle( xTask );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
{
/* Return the notification as it was before the bits were cleared,
* then clear the bit mask. */
ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return ulReturn;
}
@@ -6169,9 +6162,9 @@ TickType_t uxTaskResetEventItemValue( void )
uint32_t ulTaskGetIdleRunTimeCounter( void )
{
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xKernelLock );
tskTCB *pxTCB = (tskTCB *)xIdleTaskHandle[xPortGetCoreID()];
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xKernelLock );
return pxTCB->ulRunTimeCounter;
}
@@ -6185,7 +6178,7 @@ static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const T
const TickType_t xConstTickCount = xTickCount;
if (configNUM_CORES > 1 && listIS_CONTAINED_WITHIN(&xTasksWaitingTermination, &( pxCurrentTCB[xCoreID]->xStateListItem))) {
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xKernelLock,
so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
be suspended permanently */
return;

View File

@@ -48,18 +48,6 @@
#error configUSE_TIMERS must be set to 1 to make the xTimerPendFunctionCall() function available.
#endif
#ifdef ESP_PLATFORM
#define taskCRITICAL_MUX &xTimerMux
#undef taskENTER_CRITICAL
#undef taskEXIT_CRITICAL
#undef taskENTER_CRITICAL_ISR
#undef taskEXIT_CRITICAL_ISR
#define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
#define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
#define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
#endif
/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
* because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
* for the header files above, but not in this file, in order to generate the
@@ -160,8 +148,9 @@
PRIVILEGED_DATA static TaskHandle_t xTimerTaskHandle = NULL;
#ifdef ESP_PLATFORM
/* Mux. We use a single mux for all the timers for now. ToDo: maybe increase granularity here? */
PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
/* Spinlock required in SMP when accessing the timers. For now we use a single lock
* Todo: Each timer could possible have its own lock for increased granularity. */
PRIVILEGED_DATA portMUX_TYPE xTimerLock = portMUX_INITIALIZER_UNLOCKED;
#endif // ESP_PLATFORM
/*lint -restore */
@@ -470,7 +459,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
Timer_t * pxTimer = xTimer;
configASSERT( xTimer );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xTimerLock );
{
if( uxAutoReload != pdFALSE )
{
@@ -481,7 +470,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
pxTimer->ucStatus &= ~tmrSTATUS_IS_AUTORELOAD;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
}
/*-----------------------------------------------------------*/
@@ -491,7 +480,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
UBaseType_t uxReturn;
configASSERT( xTimer );
taskENTER_CRITICAL( );
taskENTER_CRITICAL( &xTimerLock );
{
if( ( pxTimer->ucStatus & tmrSTATUS_IS_AUTORELOAD ) == 0 )
{
@@ -504,7 +493,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
uxReturn = ( UBaseType_t ) pdTRUE;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
return uxReturn;
}
@@ -616,7 +605,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
BaseType_t xTimerListsWereSwitched;
#ifdef ESP_PLATFORM
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xTimerLock );
#else
vTaskSuspendAll();
#endif // ESP_PLATFORM
@@ -634,7 +623,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
if( ( xListWasEmpty == pdFALSE ) && ( xNextExpireTime <= xTimeNow ) )
{
#ifdef ESP_PLATFORM
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -658,7 +647,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
vQueueWaitForMessageRestricted( xTimerQueue, ( xNextExpireTime - xTimeNow ), xListWasEmpty );
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
#else
if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
@@ -680,7 +669,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
else
{
#ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
#else
( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
@@ -999,7 +988,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
/* Check that the list from which active timers are referenced, and the
* queue used to communicate with the timer service, have been
* initialised. */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xTimerLock );
{
if( xTimerQueue == NULL )
{
@@ -1041,7 +1030,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
mtCOVERAGE_TEST_MARKER();
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
}
/*-----------------------------------------------------------*/
@@ -1053,7 +1042,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
configASSERT( xTimer );
/* Is the timer in the list of active timers? */
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xTimerLock );
{
if( ( pxTimer->ucStatus & tmrSTATUS_IS_ACTIVE ) == 0 )
{
@@ -1064,7 +1053,7 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
xReturn = pdTRUE;
}
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
return xReturn;
} /*lint !e818 Can't be pointer to const due to the typedef. */
@@ -1077,11 +1066,11 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
configASSERT( xTimer );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xTimerLock );
{
pvReturn = pxTimer->pvTimerID;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
return pvReturn;
}
@@ -1094,11 +1083,11 @@ PRIVILEGED_DATA portMUX_TYPE xTimerMux = portMUX_INITIALIZER_UNLOCKED;
configASSERT( xTimer );
taskENTER_CRITICAL();
taskENTER_CRITICAL( &xTimerLock );
{
pxTimer->pvTimerID = pvNewID;
}
taskEXIT_CRITICAL();
taskEXIT_CRITICAL( &xTimerLock );
}
/*-----------------------------------------------------------*/