Merge branch 'bugfix/revrt_19761' into 'master'

Revert "Merge branch 'feature/freertos_10.4.3_sync_various_functions' into 'master'"

See merge request espressif/esp-idf!20108
This commit is contained in:
Ivan Grokhotkov
2022-09-12 22:12:02 +08:00
8 changed files with 277 additions and 316 deletions

View File

@@ -220,7 +220,7 @@ Notes:
#if ( configUSE_QUEUE_SETS != 1 ) #if ( configUSE_QUEUE_SETS != 1 )
#define traceQUEUE_SEND( pxQueue ) SYSVIEW_RecordU32x4(apiFastID_OFFSET + apiID_XQUEUEGENERICSEND, SEGGER_SYSVIEW_ShrinkId((U32)pxQueue), (U32)pvItemToQueue, xTicksToWait, xCopyPosition) #define traceQUEUE_SEND( pxQueue ) SYSVIEW_RecordU32x4(apiFastID_OFFSET + apiID_XQUEUEGENERICSEND, SEGGER_SYSVIEW_ShrinkId((U32)pxQueue), (U32)pvItemToQueue, xTicksToWait, xCopyPosition)
#else #else
#define traceQUEUE_SEND( pxQueue ) SYSVIEW_RecordU32x4(apiFastID_OFFSET + apiID_XQUEUEGENERICSEND, SEGGER_SYSVIEW_ShrinkId((U32)pxQueue), 0, 0, 0) #define traceQUEUE_SEND( pxQueue ) SYSVIEW_RecordU32x4(apiFastID_OFFSET + apiID_XQUEUEGENERICSEND, SEGGER_SYSVIEW_ShrinkId((U32)pxQueue), 0, 0, xCopyPosition)
#endif #endif
#endif // CONFIG_FREERTOS_SMP #endif // CONFIG_FREERTOS_SMP

View File

@@ -214,7 +214,9 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
{ {
EventBits_t uxOriginalBitValue, uxReturn; EventBits_t uxOriginalBitValue, uxReturn;
EventGroup_t * pxEventBits = xEventGroup; EventGroup_t * pxEventBits = xEventGroup;
#ifndef ESP_PLATFORM
BaseType_t xAlreadyYielded; BaseType_t xAlreadyYielded;
#endif // ESP_PLATFORM
BaseType_t xTimeoutOccurred = pdFALSE; BaseType_t xTimeoutOccurred = pdFALSE;
configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
@@ -274,13 +276,15 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
} }
#ifdef ESP_PLATFORM // IDF-3755 #ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xAlreadyYielded = pdFALSE;
#else #else
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM #endif // ESP_PLATFORM
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
#ifdef ESP_PLATFORM
portYIELD_WITHIN_API();
#else
if( xAlreadyYielded == pdFALSE ) if( xAlreadyYielded == pdFALSE )
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
@@ -289,6 +293,7 @@ EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
#endif // ESP_PLATFORM
/* The task blocked to wait for its required bits to be set - at this /* The task blocked to wait for its required bits to be set - at this
* point either the required bits were set or the block time expired. If * point either the required bits were set or the block time expired. If
@@ -347,7 +352,11 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
{ {
EventGroup_t * pxEventBits = xEventGroup; EventGroup_t * pxEventBits = xEventGroup;
EventBits_t uxReturn, uxControlBits = 0; EventBits_t uxReturn, uxControlBits = 0;
#ifdef ESP_PLATFORM
BaseType_t xWaitConditionMet;
#else
BaseType_t xWaitConditionMet, xAlreadyYielded; BaseType_t xWaitConditionMet, xAlreadyYielded;
#endif // ESP_PLATFORM
BaseType_t xTimeoutOccurred = pdFALSE; BaseType_t xTimeoutOccurred = pdFALSE;
/* Check the user is not attempting to wait on the bits used by the kernel /* Check the user is not attempting to wait on the bits used by the kernel
@@ -435,13 +444,15 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
} }
#ifdef ESP_PLATFORM // IDF-3755 #ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) ); taskEXIT_CRITICAL( &( pxEventBits->xEventGroupLock ) );
xAlreadyYielded = pdFALSE;
#else #else
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM #endif // ESP_PLATFORM
if( xTicksToWait != ( TickType_t ) 0 ) if( xTicksToWait != ( TickType_t ) 0 )
{ {
#ifdef ESP_PLATFORM
portYIELD_WITHIN_API();
#else
if( xAlreadyYielded == pdFALSE ) if( xAlreadyYielded == pdFALSE )
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
@@ -450,6 +461,7 @@ EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
#endif // ESP_PLATFORM
/* The task blocked to wait for its required bits to be set - at this /* The task blocked to wait for its required bits to be set - at this
* point either the required bits were set or the block time expired. If * point either the required bits were set or the block time expired. If

View File

@@ -14,7 +14,6 @@ extern "C" {
#endif #endif
#define portBYTE_ALIGNMENT 16 #define portBYTE_ALIGNMENT 16
#define portTICK_TYPE_IS_ATOMIC 1
/* Type definitions. */ /* Type definitions. */
#define portCHAR uint8_t #define portCHAR uint8_t

View File

@@ -108,7 +108,6 @@ typedef uint32_t TickType_t;
#define portSTACK_GROWTH (-1) #define portSTACK_GROWTH (-1)
#define portTICK_PERIOD_MS ((TickType_t) (1000 / configTICK_RATE_HZ)) #define portTICK_PERIOD_MS ((TickType_t) (1000 / configTICK_RATE_HZ))
#define portBYTE_ALIGNMENT 16 #define portBYTE_ALIGNMENT 16
#define portTICK_TYPE_IS_ATOMIC 1
#define portNOP() __asm volatile (" nop ") #define portNOP() __asm volatile (" nop ")

View File

@@ -140,7 +140,6 @@ typedef uint32_t TickType_t;
#define portSTACK_GROWTH ( -1 ) #define portSTACK_GROWTH ( -1 )
#define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ )
#define portBYTE_ALIGNMENT 4 #define portBYTE_ALIGNMENT 4
#define portTICK_TYPE_IS_ATOMIC 1
#define portNOP() XT_NOP() #define portNOP() XT_NOP()

View File

@@ -220,7 +220,7 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
* Checks to see if a queue is a member of a queue set, and if so, notifies * Checks to see if a queue is a member of a queue set, and if so, notifies
* the queue set that the queue contains data. * the queue set that the queue contains data.
*/ */
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
#endif #endif
/* /*
@@ -362,7 +362,9 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
* variable of type StaticQueue_t or StaticSemaphore_t equals the size of * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
* the real queue and semaphore structures. */ * the real queue and semaphore structures. */
volatile size_t xSize = sizeof( StaticQueue_t ); volatile size_t xSize = sizeof( StaticQueue_t );
configASSERT( xSize == sizeof( Queue_t ) );
/* This assertion cannot be branch covered in unit tests */
configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */ ( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
} }
#endif /* configASSERT_DEFINED */ #endif /* configASSERT_DEFINED */
@@ -403,16 +405,24 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
const UBaseType_t uxItemSize, const UBaseType_t uxItemSize,
const uint8_t ucQueueType ) const uint8_t ucQueueType )
{ {
Queue_t * pxNewQueue; Queue_t * pxNewQueue = NULL;
size_t xQueueSizeInBytes; size_t xQueueSizeInBytes;
uint8_t * pucQueueStorage; uint8_t * pucQueueStorage;
configASSERT( uxQueueLength > ( UBaseType_t ) 0 ); configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
if( uxItemSize == ( UBaseType_t ) 0 )
{
/* There is not going to be a queue storage area. */
xQueueSizeInBytes = ( size_t ) 0;
}
else
{
/* Allocate enough space to hold the maximum number of items that /* Allocate enough space to hold the maximum number of items that
* can be in the queue at any time. It is valid for uxItemSize to be * can be in the queue at any time. It is valid for uxItemSize to be
* zero in the case the queue is used as a semaphore. */ * zero in the case the queue is used as a semaphore. */
xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
}
/* Check for multiplication overflow. */ /* Check for multiplication overflow. */
configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) ); configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
@@ -588,7 +598,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
* calling task is the mutex holder, but not a good way of determining the * calling task is the mutex holder, but not a good way of determining the
* identity of the mutex holder, as the holder may change between the * identity of the mutex holder, as the holder may change between the
* following critical section exiting and the function returning. */ * following critical section exiting and the function returning. */
taskENTER_CRITICAL( &( pxSemaphore->xQueueLock ) ); #ifdef ESP_PLATFORM
Queue_t * const pxQueue = (Queue_t *)pxSemaphore;
#endif
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{ {
if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX ) if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
{ {
@@ -599,7 +612,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
pxReturn = NULL; pxReturn = NULL;
} }
} }
taskEXIT_CRITICAL( &( pxSemaphore->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return pxReturn; return pxReturn;
} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */ } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
@@ -737,7 +750,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
const UBaseType_t uxInitialCount, const UBaseType_t uxInitialCount,
StaticQueue_t * pxStaticQueue ) StaticQueue_t * pxStaticQueue )
{ {
QueueHandle_t xHandle; QueueHandle_t xHandle = NULL;
configASSERT( uxMaxCount != 0 ); configASSERT( uxMaxCount != 0 );
configASSERT( uxInitialCount <= uxMaxCount ); configASSERT( uxInitialCount <= uxMaxCount );
@@ -766,7 +779,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
const UBaseType_t uxInitialCount ) const UBaseType_t uxInitialCount )
{ {
QueueHandle_t xHandle; QueueHandle_t xHandle = NULL;
configASSERT( uxMaxCount != 0 ); configASSERT( uxMaxCount != 0 );
configASSERT( uxInitialCount <= uxMaxCount ); configASSERT( uxInitialCount <= uxMaxCount );
@@ -831,7 +844,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
{ {
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
@@ -844,7 +857,7 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* in the queue has not changed. */ * in the queue has not changed. */
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting /* The queue is a member of a queue set, and posting
* to the queue set caused a higher priority task to * to the queue set caused a higher priority task to
@@ -1066,7 +1079,6 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
const int8_t cTxLock = pxQueue->cTxLock; const int8_t cTxLock = pxQueue->cTxLock;
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
traceQUEUE_SEND_FROM_ISR( pxQueue ); traceQUEUE_SEND_FROM_ISR( pxQueue );
@@ -1085,14 +1097,7 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) ) if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
{
/* Do not notify the queue set as an existing item
* was overwritten in the queue so the number of items
* in the queue has not changed. */
mtCOVERAGE_TEST_MARKER();
}
else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting /* The queue is a member of a queue set, and posting
* to the queue set caused a higher priority task to * to the queue set caused a higher priority task to
@@ -1165,9 +1170,6 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
/* Not used in this path. */
( void ) uxPreviousMessagesWaiting;
} }
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
} }
@@ -1265,7 +1267,7 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
{ {
/* The semaphore is a member of a queue set, and /* The semaphore is a member of a queue set, and
* posting to the queue set caused a higher priority * posting to the queue set caused a higher priority
@@ -1345,8 +1347,6 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
{ {
/* Increment the lock count so the task that unlocks the queue /* Increment the lock count so the task that unlocks the queue
* knows that data was posted while it was locked. */ * knows that data was posted while it was locked. */
configASSERT( cTxLock != queueINT8_MAX );
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
} }
@@ -2007,8 +2007,6 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
{ {
/* Increment the lock count so the task that unlocks the queue /* Increment the lock count so the task that unlocks the queue
* knows that data was removed while it was locked. */ * knows that data was removed while it was locked. */
configASSERT( cRxLock != queueINT8_MAX );
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
} }
@@ -2087,14 +2085,15 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue ) UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
{ {
UBaseType_t uxReturn; UBaseType_t uxReturn;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( xQueue ); configASSERT( xQueue );
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) ); taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{ {
uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting; uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
} }
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueue )->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return uxReturn; return uxReturn;
} /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */ } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
@@ -2354,7 +2353,7 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
{ {
if( pxQueue->pxQueueSetContainer != NULL ) if( pxQueue->pxQueueSetContainer != NULL )
{ {
if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE ) if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
{ {
/* The queue is a member of a queue set, and posting to /* The queue is a member of a queue set, and posting to
* the queue set caused a higher priority task to unblock. * the queue set caused a higher priority task to unblock.
@@ -2497,9 +2496,6 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
{ {
BaseType_t xReturn; BaseType_t xReturn;
#ifndef ESP_PLATFORM
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#endif
{ {
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{ {
@@ -2510,9 +2506,6 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
#ifndef ESP_PLATFORM
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#endif
return xReturn; return xReturn;
} }
@@ -2984,8 +2977,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
QueueSetHandle_t xQueueSet ) QueueSetHandle_t xQueueSet )
{ {
BaseType_t xReturn; BaseType_t xReturn;
#ifdef ESP_PLATFORM
Queue_t * pxQueue = (Queue_t * )xQueueOrSemaphore;
#endif
taskENTER_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) ); taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{ {
if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL ) if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
{ {
@@ -3004,7 +3000,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
xReturn = pdPASS; xReturn = pdPASS;
} }
} }
taskEXIT_CRITICAL( &( ( ( Queue_t * ) xQueueOrSemaphore )->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
return xReturn; return xReturn;
} }
@@ -3034,12 +3030,15 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
else else
{ {
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) ); #ifdef ESP_PLATFORM
Queue_t* pxQueue = (Queue_t*)pxQueueOrSemaphore;
#endif
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{ {
/* The queue is no longer contained in the set. */ /* The queue is no longer contained in the set. */
pxQueueOrSemaphore->pxQueueSetContainer = NULL; pxQueueOrSemaphore->pxQueueSetContainer = NULL;
} }
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueueOrSemaphore )->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
xReturn = pdPASS; xReturn = pdPASS;
} }
@@ -3078,16 +3077,20 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
#if ( configUSE_QUEUE_SETS == 1 ) #if ( configUSE_QUEUE_SETS == 1 )
static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue,
const BaseType_t xCopyPosition )
{ {
Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer; Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
BaseType_t xReturn = pdFALSE; BaseType_t xReturn = pdFALSE;
/* This function must be called form a critical section. */ /* This function must be called form a critical section. */
configASSERT( pxQueueSetContainer ); /* The following line is not reachable in unit tests because every call
* to prvNotifyQueueSetContainer is preceded by a check that
* pxQueueSetContainer != NULL */
configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
/* Acquire the Queue set's spinlock */ //Acquire the Queue set's spinlock
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
@@ -3096,10 +3099,10 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
{ {
const int8_t cTxLock = pxQueueSetContainer->cTxLock; const int8_t cTxLock = pxQueueSetContainer->cTxLock;
traceQUEUE_SET_SEND( pxQueueSetContainer ); traceQUEUE_SEND( pxQueueSetContainer );
/* The data copied is the handle of the queue that contains data. */ /* The data copied is the handle of the queue that contains data. */
xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK ); xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
if( cTxLock == queueUNLOCKED ) if( cTxLock == queueUNLOCKED )
{ {
@@ -3122,8 +3125,6 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
else else
{ {
configASSERT( cTxLock != queueINT8_MAX );
pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
} }
} }
@@ -3132,7 +3133,7 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
/* Release the Queue set's spinlock */ //Release the Queue set's spinlock
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
return xReturn; return xReturn;

View File

@@ -588,14 +588,15 @@ size_t xStreamBufferSend( StreamBufferHandle_t xStreamBuffer,
size_t xReturn, xSpace = 0; size_t xReturn, xSpace = 0;
size_t xRequiredSpace = xDataLengthBytes; size_t xRequiredSpace = xDataLengthBytes;
TimeOut_t xTimeOut; TimeOut_t xTimeOut;
size_t xMaxReportedSpace = 0;
/* The maximum amount of space a stream buffer will ever report is its length
* minus 1. */
const size_t xMaxReportedSpace = pxStreamBuffer->xLength - ( size_t ) 1;
configASSERT( pvTxData ); configASSERT( pvTxData );
configASSERT( pxStreamBuffer ); configASSERT( pxStreamBuffer );
/* The maximum amount of space a stream buffer will ever report is its length
* minus 1. */
xMaxReportedSpace = pxStreamBuffer->xLength - ( size_t ) 1;
/* This send function is used to write to both message buffers and stream /* This send function is used to write to both message buffers and stream
* buffers. If this is a message buffer then the space needed must be * buffers. If this is a message buffer then the space needed must be
* increased by the amount of bytes needed to store the length of the * increased by the amount of bytes needed to store the length of the

View File

@@ -259,15 +259,6 @@ extern void esp_vApplicationIdleHook(void);
#define taskCAN_RUN_ON_CORE( xCore, xCoreID ) ( pdTRUE ) #define taskCAN_RUN_ON_CORE( xCore, xCoreID ) ( pdTRUE )
#endif /* configNUM_CORES > 1 */ #endif /* configNUM_CORES > 1 */
/* Check if a task is a currently running task. */
#if ( configNUM_CORES > 1 )
#define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( ( pxTCB ) == pxCurrentTCB[ 0 ] ) || ( ( pxTCB ) == pxCurrentTCB[ 1 ] ) )
#define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) ( ( pxTCB ) == pxCurrentTCB[ ( xCoreID ) ] )
#else
#define taskIS_CURRENTLY_RUNNING( pxTCB ) ( ( pxTCB ) == pxCurrentTCB[ 0 ] )
#define taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xCoreID ) taskIS_CURRENTLY_RUNNING( ( pxTCB ) )
#endif /* configNUM_CORES > 1 */
/* /*
* Several functions take a TaskHandle_t parameter that can optionally be NULL, * Several functions take a TaskHandle_t parameter that can optionally be NULL,
* where NULL is used to indicate that the handle of the currently executing * where NULL is used to indicate that the handle of the currently executing
@@ -699,21 +690,21 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pxTaskCode, TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pvTaskCode,
const char * const pcName, const char * const pcName,
const uint32_t ulStackDepth, const uint32_t ulStackDepth,
void * const pvParameters, void * const pvParameters,
UBaseType_t uxPriority, UBaseType_t uxPriority,
StackType_t * const puxStackBuffer, StackType_t * const pxStackBuffer,
StaticTask_t * const pxTaskBuffer, StaticTask_t * const pxTaskBuffer,
const BaseType_t xCoreID ) const BaseType_t xCoreID )
{ {
TCB_t *pxNewTCB; TCB_t *pxNewTCB;
TaskHandle_t xReturn; TaskHandle_t xReturn;
configASSERT( portVALID_STACK_MEM( puxStackBuffer ) );
configASSERT( portVALID_TCB_MEM(pxTaskBuffer) ); configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
configASSERT( ( ( xCoreID >= 0 ) && ( xCoreID < configNUM_CORES ) ) || ( xCoreID == tskNO_AFFINITY ) ); configASSERT( portVALID_STACK_MEM(pxStackBuffer) );
configASSERT( (xCoreID>=0 && xCoreID<configNUM_CORES) || (xCoreID==tskNO_AFFINITY) );
#if ( configASSERT_DEFINED == 1 ) #if ( configASSERT_DEFINED == 1 )
{ {
@@ -726,12 +717,13 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
} }
#endif /* configASSERT_DEFINED */ #endif /* configASSERT_DEFINED */
if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
if( ( pxTaskBuffer != NULL ) && ( pxStackBuffer != NULL ) )
{ {
/* The memory used for the task's TCB and stack are passed into this /* The memory used for the task's TCB and stack are passed into this
* function - use them. */ * function - use them. */
pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; pxNewTCB->pxStack = ( StackType_t * ) pxStackBuffer;
#if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
{ {
@@ -741,7 +733,7 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
} }
#endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID ); prvInitialiseNewTask( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
prvAddNewTaskToReadyList( pxNewTCB ); prvAddNewTaskToReadyList( pxNewTCB );
} }
else else
@@ -856,12 +848,12 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pxTaskCode, BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pvTaskCode,
const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
const configSTACK_DEPTH_TYPE usStackDepth, const uint32_t usStackDepth,
void * const pvParameters, void * const pvParameters,
UBaseType_t uxPriority, UBaseType_t uxPriority,
TaskHandle_t * const pxCreatedTask, TaskHandle_t * const pvCreatedTask,
const BaseType_t xCoreID) const BaseType_t xCoreID)
{ {
TCB_t * pxNewTCB; TCB_t * pxNewTCB;
@@ -933,7 +925,7 @@ static BaseType_t prvCheckForYieldUsingPrioritySMP( UBaseType_t uxTaskPriority,
} }
#endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL, xCoreID ); prvInitialiseNewTask( pvTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pvCreatedTask, pxNewTCB, NULL, xCoreID );
prvAddNewTaskToReadyList( pxNewTCB ); prvAddNewTaskToReadyList( pxNewTCB );
xReturn = pdPASS; xReturn = pdPASS;
} }
@@ -961,10 +953,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
StackType_t * pxTopOfStack; StackType_t * pxTopOfStack;
UBaseType_t x; UBaseType_t x;
#if ( configNUM_CORES == 1 ) #if (configNUM_CORES < 2)
{
xCoreID = 0; xCoreID = 0;
}
#endif #endif
#if ( portUSING_MPU_WRAPPERS == 1 ) #if ( portUSING_MPU_WRAPPERS == 1 )
@@ -1373,17 +1363,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
void vTaskDelete( TaskHandle_t xTaskToDelete ) void vTaskDelete( TaskHandle_t xTaskToDelete )
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
BaseType_t xFreeNow; TCB_t * curTCB;
BaseType_t core;
BaseType_t xFreeNow = 0;
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
{ {
BaseType_t xCurCoreID; core = xPortGetCoreID();
#if ( configNUM_CORES > 1 ) curTCB = pxCurrentTCB[core];
xCurCoreID = xPortGetCoreID();
#else
xCurCoreID = 0;
( void ) xCurCoreID;
#endif
/* If null is passed in here then it is the calling task that is /* If null is passed in here then it is the calling task that is
* being deleted. */ * being deleted. */
@@ -1415,19 +1402,12 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* not return. */ * not return. */
uxTaskNumber++; uxTaskNumber++;
/* if( pxTCB == curTCB ||
* We cannot immediately a task that is /* in SMP, we also can't immediately delete the task active on the other core */
* - Currently running on either core (configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
* - If the task is not currently running but is pinned to the other (due to FPU cleanup) /* ... and we can't delete a non-running task pinned to the other core, as
* Todo: Allow deletion of tasks pinned to other core (IDF-5803) FPU cleanup has to happen on the same core */
*/ (configNUM_CORES > 1 && pxTCB->xCoreID == (!core)) )
#if ( configNUM_CORES > 1 )
xFreeNow = ( taskIS_CURRENTLY_RUNNING( pxTCB ) || ( pxTCB->xCoreID == !xCurCoreID ) ) ? pdFALSE : pdTRUE;
#else
xFreeNow = ( taskIS_CURRENTLY_RUNNING( pxTCB ) ) ? pdFALSE : pdTRUE;
#endif /* configNUM_CORES > 1 */
if( xFreeNow == pdFALSE )
{ {
/* A task is deleting itself. This cannot complete within the /* A task is deleting itself. This cannot complete within the
* task itself, as a context switch to another task is required. * task itself, as a context switch to another task is required.
@@ -1441,19 +1421,14 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* check the xTasksWaitingTermination list. */ * check the xTasksWaitingTermination list. */
++uxDeletedTasksWaitingCleanUp; ++uxDeletedTasksWaitingCleanUp;
/* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
* portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
traceTASK_DELETE( pxTCB );
/* The pre-delete hook is primarily for the Windows simulator, /* The pre-delete hook is primarily for the Windows simulator,
* in which Windows specific clean up operations are performed, * in which Windows specific clean up operations are performed,
* after which it is not possible to yield away from this task - * after which it is not possible to yield away from this task -
* hence xYieldPending is used to latch that a context switch is * hence xYieldPending is used to latch that a context switch is
* required. */ * required. */
portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[ xCurCoreID ] ); portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[core] );
#if ( configNUM_CORES > 1 ) if (configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ])
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xCurCoreID ) )
{ {
/* SMP case of deleting a task running on a different core. Same issue /* SMP case of deleting a task running on a different core. Same issue
as a task deleting itself, but we need to send a yield to this task now as a task deleting itself, but we need to send a yield to this task now
@@ -1465,19 +1440,20 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
xTasksWaitingTermination list in this case (instead it will immediately xTasksWaitingTermination list in this case (instead it will immediately
release xKernelLock again and be yielded before the FreeRTOS function release xKernelLock again and be yielded before the FreeRTOS function
returns.) */ returns.) */
vPortYieldOtherCore( !xCurCoreID ); vPortYieldOtherCore( !core );
} }
#endif /* configNUM_CORES > 1 */
} }
else else
{ {
--uxCurrentNumberOfTasks; --uxCurrentNumberOfTasks;
traceTASK_DELETE( pxTCB ); xFreeNow = pdTRUE;
/* Reset the next expected unblock time in case it referred to /* Reset the next expected unblock time in case it referred to
* the task that has just been deleted. */ * the task that has just been deleted. */
prvResetNextTaskUnblockTime(); prvResetNextTaskUnblockTime();
} }
traceTASK_DELETE( pxTCB );
} }
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
@@ -1493,8 +1469,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* been deleted. */ * been deleted. */
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
taskENTER_CRITICAL( &xKernelLock ); if( pxTCB == curTCB )
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) )
{ {
configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED ); configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
@@ -1503,7 +1478,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
taskEXIT_CRITICAL( &xKernelLock );
} }
} }
@@ -1525,7 +1499,11 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
const TickType_t xTimeIncrement ) const TickType_t xTimeIncrement )
{ {
TickType_t xTimeToWake; TickType_t xTimeToWake;
#ifdef ESP_PLATFORM
BaseType_t xShouldDelay = pdFALSE;
#else
BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE; BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
#endif // ESP_PLATFORM
configASSERT( pxPreviousWakeTime ); configASSERT( pxPreviousWakeTime );
configASSERT( ( xTimeIncrement > 0U ) ); configASSERT( ( xTimeIncrement > 0U ) );
@@ -1593,13 +1571,15 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
} }
#ifdef ESP_PLATFORM // IDF-3755 #ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
xAlreadyYielded = pdFALSE;
#else #else
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM #endif // ESP_PLATFORM
/* Force a reschedule if xTaskResumeAll has not already done so, we may /* Force a reschedule if xTaskResumeAll has not already done so, we may
* have put ourselves to sleep. */ * have put ourselves to sleep. */
#ifdef ESP_PLATFORM
portYIELD_WITHIN_API();
#else
if( xAlreadyYielded == pdFALSE ) if( xAlreadyYielded == pdFALSE )
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
@@ -1608,7 +1588,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
#endif // ESP_PLATFORM
return xShouldDelay; return xShouldDelay;
} }
@@ -1619,8 +1599,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
void vTaskDelay( const TickType_t xTicksToDelay ) void vTaskDelay( const TickType_t xTicksToDelay )
{ {
BaseType_t xAlreadyYielded = pdFALSE;
/* A delay time of zero just forces a reschedule. */ /* A delay time of zero just forces a reschedule. */
if( xTicksToDelay > ( TickType_t ) 0U ) if( xTicksToDelay > ( TickType_t ) 0U )
{ {
@@ -1644,7 +1622,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
} }
#ifdef ESP_PLATFORM // IDF-3755 #ifdef ESP_PLATFORM // IDF-3755
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
xAlreadyYielded = pdFALSE;
#else #else
xAlreadyYielded = xTaskResumeAll(); xAlreadyYielded = xTaskResumeAll();
#endif // ESP_PLATFORM #endif // ESP_PLATFORM
@@ -1654,17 +1631,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
/* Force a reschedule if xTaskResumeAll has not already done so, we may /* Force a reschedule, we may have put ourselves to sleep. */
* have put ourselves to sleep. */
if( xAlreadyYielded == pdFALSE )
{
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* INCLUDE_vTaskDelay */ #endif /* INCLUDE_vTaskDelay */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -1680,11 +1649,18 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
configASSERT( pxTCB ); configASSERT( pxTCB );
taskENTER_CRITICAL( &xKernelLock ); //Need critical section incase either core context switches in between taskENTER_CRITICAL( &xKernelLock ); //Need critical section incase either core context switches in between
if( taskIS_CURRENTLY_RUNNING( pxTCB ) ) if( pxTCB == pxCurrentTCB[xPortGetCoreID()])
{ {
/* The task calling this function is querying its own state. */ /* The task calling this function is querying its own state. */
eReturn = eRunning; eReturn = eRunning;
} }
#if (configNUM_CORES > 1)
else if (pxTCB == pxCurrentTCB[!xPortGetCoreID()])
{
/* The task calling this function is querying its own state. */
eReturn = eRunning;
}
#endif
else else
{ {
pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
@@ -1871,7 +1847,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* priority than the calling task. */ * priority than the calling task. */
if( uxNewPriority > uxCurrentBasePriority ) if( uxNewPriority > uxCurrentBasePriority )
{ {
if( !taskIS_CURRENTLY_RUNNING( pxTCB ) ) if( pxTCB != pxCurrentTCB[ xPortGetCoreID() ] )
{ {
/* The priority of a task other than the currently /* The priority of a task other than the currently
* running task is being raised. Is the priority being * running task is being raised. Is the priority being
@@ -1892,22 +1868,13 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* priority task able to run so no yield is required. */ * priority task able to run so no yield is required. */
} }
} }
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 0 ) ) else if( pxTCB == pxCurrentTCB[ xPortGetCoreID() ] )
{ {
/* Setting the priority of the running task down means /* Setting the priority of the running task down means
* there may now be another task of higher priority that * there may now be another task of higher priority that
* is ready to execute. */ * is ready to execute. */
xYieldRequired = pdTRUE; xYieldRequired = pdTRUE;
} }
#if ( configNUM_CORES > 1 )
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, 1 ) )
{
/* Setting the priority of the running task on the other
* core down means there may now be another task of
* higher priority that is ready to execute. */
vPortYieldOtherCore( 1 );
}
#endif /* configNUM_CORES > 1 */
else else
{ {
/* Setting the priority of any other task down does not /* Setting the priority of any other task down does not
@@ -2006,6 +1973,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
void vTaskSuspend( TaskHandle_t xTaskToSuspend ) void vTaskSuspend( TaskHandle_t xTaskToSuspend )
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
TCB_t * curTCB;
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
{ {
@@ -2037,6 +2005,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
} }
vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
curTCB = pxCurrentTCB[ xPortGetCoreID() ];
#if ( configUSE_TASK_NOTIFICATIONS == 1 ) #if ( configUSE_TASK_NOTIFICATIONS == 1 )
{ {
@@ -2053,24 +2022,35 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
} }
} }
#endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
}
taskEXIT_CRITICAL( &xKernelLock );
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
/* Reset the next expected unblock time in case it referred to the /* Reset the next expected unblock time in case it referred to the
* task that is now in the Suspended state. */ * task that is now in the Suspended state. */
taskENTER_CRITICAL( &xKernelLock );
{
prvResetNextTaskUnblockTime(); prvResetNextTaskUnblockTime();
} }
taskEXIT_CRITICAL( &xKernelLock );
}
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, xPortGetCoreID() ) ) if( pxTCB == curTCB )
{ {
if( xSchedulerRunning != pdFALSE ) if( xSchedulerRunning != pdFALSE )
{ {
/* The current task has just been suspended. */ /* The current task has just been suspended. */
configASSERT( uxSchedulerSuspended[ xPortGetCoreID() ] == 0 ); taskENTER_CRITICAL( &xKernelLock );
BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()];
taskEXIT_CRITICAL( &xKernelLock );
configASSERT( suspended == 0 );
(void)suspended;
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
else else
@@ -2084,7 +2064,9 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
* NULL so when the next task is created pxCurrentTCB will * NULL so when the next task is created pxCurrentTCB will
* be set to point to it no matter what its relative priority * be set to point to it no matter what its relative priority
* is. */ * is. */
taskENTER_CRITICAL( &xKernelLock );
pxCurrentTCB[ xPortGetCoreID() ] = NULL; pxCurrentTCB[ xPortGetCoreID() ] = NULL;
taskEXIT_CRITICAL( &xKernelLock );
} }
else else
{ {
@@ -2092,31 +2074,24 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
} }
} }
} }
#if ( configNUM_CORES > 1 )
else if( taskIS_CURRENTLY_RUNNING_ON_CORE( pxTCB, !xPortGetCoreID() ) )
{
/* The other core's current task has just been suspended */
if( xSchedulerRunning != pdFALSE )
{
vPortYieldOtherCore( !xPortGetCoreID() );
}
else else
{ {
/* The scheduler is not running, but the task that was pointed if( xSchedulerRunning != pdFALSE )
* to by pxCurrentTCB[ otherCore ] has just been suspended. {
* We simply set the pxCurrentTCB[ otherCore ] to NULL for now. /* A task other than the currently running task was suspended,
* Todo: Update vTaskSwitchContext() to be runnable on * reset the next expected unblock time in case it referred to the
* behalf of the other core. */ * task that is now in the Suspended state. */
pxCurrentTCB[ !xPortGetCoreID() ] = NULL; taskENTER_CRITICAL( &xKernelLock );
{
prvResetNextTaskUnblockTime();
} }
taskEXIT_CRITICAL( &xKernelLock );
} }
#endif /* configNUM_CORES > 1 */
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
taskEXIT_CRITICAL( &xKernelLock );
} }
#endif /* INCLUDE_vTaskSuspend */ #endif /* INCLUDE_vTaskSuspend */
@@ -2139,12 +2114,8 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE ) if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
{ {
/* Has the task already been resumed from within an ISR? */ /* Has the task already been resumed from within an ISR? */
#if ( configNUM_CORES > 1 ) if( listIS_CONTAINED_WITHIN( &xPendingReadyList[xPortGetCoreID()], &( pxTCB->xEventListItem )) == pdFALSE &&
if( ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) listIS_CONTAINED_WITHIN( &xPendingReadyList[!xPortGetCoreID()], &( pxTCB->xEventListItem )) == pdFALSE )
&& ( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 1 ], &( pxTCB->xEventListItem ) ) == pdFALSE ) )
#else
if( listIS_CONTAINED_WITHIN( &xPendingReadyList[ 0 ], &( pxTCB->xEventListItem ) ) == pdFALSE )
#endif
{ {
/* Is it in the suspended list because it is in the Suspended /* Is it in the suspended list because it is in the Suspended
* state, or because is is blocked with no timeout? */ * state, or because is is blocked with no timeout? */
@@ -2186,7 +2157,7 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
{ {
/* The parameter cannot be NULL as it is impossible to resume the /* The parameter cannot be NULL as it is impossible to resume the
* currently executing task. */ * currently executing task. */
if( !taskIS_CURRENTLY_RUNNING( pxTCB ) && ( pxTCB != NULL ) ) if( ( pxTCB != pxCurrentTCB[xPortGetCoreID()] ) && ( pxTCB != NULL ) )
{ {
if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
{ {
@@ -2261,7 +2232,6 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
traceTASK_RESUME_FROM_ISR( pxTCB ); traceTASK_RESUME_FROM_ISR( pxTCB );
/* Check the ready lists can be accessed. */ /* Check the ready lists can be accessed. */
/* Known issue IDF-5856. We also need to check if the other core is suspended */
if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE ) if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
{ {
/* Ready lists can be accessed so move the task from the /* Ready lists can be accessed so move the task from the
@@ -2460,11 +2430,11 @@ void vTaskSuspendAll( void )
* BaseType_t. Please read Richard Barry's reply in the following link to a * BaseType_t. Please read Richard Barry's reply in the following link to a
* post in the FreeRTOS support forum before reporting this as a bug! - * post in the FreeRTOS support forum before reporting this as a bug! -
* https://goo.gl/wu4acr */ * https://goo.gl/wu4acr */
#ifdef ESP_PLATFORM
#if ( configNUM_CORES > 1 )
/* For SMP, although each core has their own uxSchedulerSuspended, we still /* For SMP, although each core has their own uxSchedulerSuspended, we still
* need enter a critical section when accessing. */ * need to disable interrupts or enter a critical section when accessing. */
taskENTER_CRITICAL( &xKernelLock ); unsigned state;
state = portSET_INTERRUPT_MASK_FROM_ISR();
#endif #endif
/* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that
@@ -2479,53 +2449,55 @@ void vTaskSuspendAll( void )
* the above increment elsewhere. */ * the above increment elsewhere. */
portMEMORY_BARRIER(); portMEMORY_BARRIER();
#if ( configNUM_CORES > 1 ) #ifdef ESP_PLATFORM
taskEXIT_CRITICAL( &xKernelLock ); portCLEAR_INTERRUPT_MASK_FROM_ISR( state );
#endif #endif
} }
/*----------------------------------------------------------*/ /*----------------------------------------------------------*/
#if ( configUSE_TICKLESS_IDLE != 0 ) #if ( configUSE_TICKLESS_IDLE != 0 )
#if ( configNUM_CORES > 1 )
static BaseType_t xHaveReadyTasks( void )
{
for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
{
if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
{
return pdTRUE;
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
return pdFALSE;
}
#endif // configNUM_CORES > 1
static TickType_t prvGetExpectedIdleTime( void ) static TickType_t prvGetExpectedIdleTime( void )
{ {
TickType_t xReturn; TickType_t xReturn;
UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
/* We need a critical section here as we are about to access kernel data structures */
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
/* uxHigherPriorityReadyTasks takes care of the case where
* configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
* task that are in the Ready state, even though the idle task is
* running. */
#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
{
if( uxTopReadyPriority > tskIDLE_PRIORITY )
{
uxHigherPriorityReadyTasks = pdTRUE;
}
}
#else
{
const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
/* When port optimised task selection is used the uxTopReadyPriority
* variable is used as a bit map. If bits other than the least
* significant bit are set then there are tasks that have a priority
* above the idle priority that are in the Ready state. This takes
* care of the case where the co-operative scheduler is in use. */
if( uxTopReadyPriority > uxLeastSignificantBit )
{
uxHigherPriorityReadyTasks = pdTRUE;
}
}
#endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY ) if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
{ {
xReturn = 0; xReturn = 0;
} }
#if configNUM_CORES > 1
/* This function is called from Idle task; in single core case this
* means that no higher priority tasks are ready to run, and we can
* enter sleep. In SMP case, there might be ready tasks waiting for
* the other CPU, so need to check all ready lists.
*/
else if( xHaveReadyTasks() )
{
xReturn = 0;
}
#endif // configNUM_CORES > 1
else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > configNUM_CORES ) else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > configNUM_CORES )
{ {
/* There are other idle priority tasks in the ready state. If /* There are other idle priority tasks in the ready state. If
@@ -2533,18 +2505,10 @@ void vTaskSuspendAll( void )
* processed. */ * processed. */
xReturn = 0; xReturn = 0;
} }
else if( uxHigherPriorityReadyTasks != pdFALSE )
{
/* There are tasks in the Ready state that have a priority above the
* idle priority. This path can only be reached if
* configUSE_PREEMPTION is 0. */
xReturn = 0;
}
else else
{ {
xReturn = xNextTaskUnblockTime - xTickCount; xReturn = xNextTaskUnblockTime - xTickCount;
} }
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
return xReturn; return xReturn;
@@ -2569,9 +2533,12 @@ BaseType_t xTaskResumeAll( void )
* tasks from this list into their appropriate ready list. */ * tasks from this list into their appropriate ready list. */
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
{ {
#ifdef ESP_PLATFORM
/* Minor optimization. Core ID can't change while inside a critical section */ /* Minor optimization. Core ID can't change while inside a critical section */
BaseType_t xCoreID = xPortGetCoreID(); BaseType_t xCoreID = xPortGetCoreID();
#else
BaseType_t xCoreID = 0;
#endif
--uxSchedulerSuspended[ xCoreID ]; --uxSchedulerSuspended[ xCoreID ];
if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE ) if( uxSchedulerSuspended[ xCoreID ] == ( UBaseType_t ) pdFALSE )
@@ -2614,9 +2581,11 @@ BaseType_t xTaskResumeAll( void )
* they should be processed now. This ensures the tick count does * they should be processed now. This ensures the tick count does
* not slip, and that any delayed tasks are resumed at the correct * not slip, and that any delayed tasks are resumed at the correct
* time. */ * time. */
#ifdef ESP_PLATFORM
/* Core 0 is solely responsible for managing tick count, thus it /* Core 0 is solely responsible for managing tick count, thus it
* must be the only core to unwind the pended ticks */ * must be the only core to unwind the pended ticks */
if ( xCoreID == 0 ) if ( xCoreID == 0 )
#endif
{ {
TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
@@ -2674,12 +2643,7 @@ TickType_t xTaskGetTickCount( void )
{ {
TickType_t xTicks; TickType_t xTicks;
/* Critical section required if running on a 16 bit processor. */
portTICK_TYPE_ENTER_CRITICAL();
{
xTicks = xTickCount; xTicks = xTickCount;
}
portTICK_TYPE_EXIT_CRITICAL();
return xTicks; return xTicks;
} }
@@ -2688,6 +2652,7 @@ TickType_t xTaskGetTickCount( void )
TickType_t xTaskGetTickCountFromISR( void ) TickType_t xTaskGetTickCountFromISR( void )
{ {
TickType_t xReturn; TickType_t xReturn;
UBaseType_t uxSavedInterruptStatus;
/* RTOS ports that support interrupt nesting have the concept of a maximum /* RTOS ports that support interrupt nesting have the concept of a maximum
* system call (or maximum API call) interrupt priority. Interrupts that are * system call (or maximum API call) interrupt priority. Interrupts that are
@@ -2705,21 +2670,11 @@ TickType_t xTaskGetTickCountFromISR( void )
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
#if ( configNUM_CORES > 1 )
/* We need a critical section here as we are about to access kernel data structures */
taskENTER_CRITICAL_ISR( &xKernelLock );
#else
UBaseType_t uxSavedInterruptStatus;
uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
#endif
{ {
xReturn = xTickCount; xReturn = xTickCount;
} }
#if ( configNUM_CORES > 1 )
taskEXIT_CRITICAL_ISR( &xKernelLock );
#else
portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@@ -3490,22 +3445,24 @@ BaseType_t xTaskIncrementTick( void )
{ {
TCB_t * pxTCB; TCB_t * pxTCB;
TaskHookFunction_t xReturn; TaskHookFunction_t xReturn;
#ifndef ESP_PLATFORM
UBaseType_t uxSavedInterruptStatus;
#endif
/* If xTask is NULL then set the calling task's hook. */ /* If xTask is NULL then set the calling task's hook. */
pxTCB = prvGetTCBFromHandle( xTask ); pxTCB = prvGetTCBFromHandle( xTask );
/* Save the hook function in the TCB. A critical section is required as /* Save the hook function in the TCB. A critical section is required as
* the value can be accessed from an interrupt. */ * the value can be accessed from an interrupt. */
#if ( configNUM_CORES > 1 ) #ifdef ESP_PLATFORM
taskENTER_CRITICAL_ISR( &xKernelLock ); taskENTER_CRITICAL_ISR( &xKernelLock );
#else #else
UBaseType_t uxSavedInterruptStatus;
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif #endif
{ {
xReturn = pxTCB->pxTaskTag; xReturn = pxTCB->pxTaskTag;
} }
#if ( configNUM_CORES > 1 ) #ifdef ESP_PLATFORM
taskEXIT_CRITICAL_ISR( &xKernelLock ); taskEXIT_CRITICAL_ISR( &xKernelLock );
#else #else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
@@ -4097,7 +4054,6 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
* around and gone past again. This passed since vTaskSetTimeout() * around and gone past again. This passed since vTaskSetTimeout()
* was called. */ * was called. */
xReturn = pdTRUE; xReturn = pdTRUE;
*pxTicksToWait = ( TickType_t ) 0;
} }
else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */ else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
{ {
@@ -4108,7 +4064,7 @@ BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
} }
else else
{ {
*pxTicksToWait = ( TickType_t ) 0; *pxTicksToWait = 0;
xReturn = pdTRUE; xReturn = pdTRUE;
} }
} }
@@ -4545,6 +4501,7 @@ static void prvCheckTasksWaitingTermination( void )
pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
pxTaskStatus->pxStackBase = pxTCB->pxStack; pxTaskStatus->pxStackBase = pxTCB->pxStack;
pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
#if ( configTASKLIST_INCLUDE_COREID == 1 ) #if ( configTASKLIST_INCLUDE_COREID == 1 )
pxTaskStatus->xCoreID = pxTCB->xCoreID; pxTaskStatus->xCoreID = pxTCB->xCoreID;
#endif /* configTASKLIST_INCLUDE_COREID */ #endif /* configTASKLIST_INCLUDE_COREID */
@@ -4925,7 +4882,6 @@ static void prvResetNextTaskUnblockTime( void )
BaseType_t xReturn; BaseType_t xReturn;
unsigned state; unsigned state;
/* Known issue. This should use critical sections. See IDF-5889 */
state = portSET_INTERRUPT_MASK_FROM_ISR(); state = portSET_INTERRUPT_MASK_FROM_ISR();
if( xSchedulerRunning == pdFALSE ) if( xSchedulerRunning == pdFALSE )
{ {
@@ -5560,17 +5516,13 @@ static void prvResetNextTaskUnblockTime( void )
TickType_t uxTaskResetEventItemValue( void ) TickType_t uxTaskResetEventItemValue( void )
{ {
TickType_t uxReturn; TickType_t uxReturn;
TCB_t *pxCurTCB;
taskENTER_CRITICAL( &xKernelLock ); taskENTER_CRITICAL( &xKernelLock );
pxCurTCB = pxCurrentTCB[ xPortGetCoreID() ]; uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ) );
/* Reset the event list item to its normal value - so it can be used with /* Reset the event list item to its normal value - so it can be used with
* queues and semaphores. */ * queues and semaphores. */
listSET_LIST_ITEM_VALUE( &( pxCurTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
taskEXIT_CRITICAL( &xKernelLock ); taskEXIT_CRITICAL( &xKernelLock );
return uxReturn; return uxReturn;
@@ -6212,15 +6164,13 @@ static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
const TickType_t xConstTickCount = xTickCount; const TickType_t xConstTickCount = xTickCount;
BaseType_t xCurCoreID = xPortGetCoreID(); BaseType_t xCurCoreID = xPortGetCoreID();
#if ( configNUM_CORES > 1 ) if( ( configNUM_CORES > 1 ) && listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCB[ xCurCoreID ]->xStateListItem ) ) )
if( listIS_CONTAINED_WITHIN( &xTasksWaitingTermination, &( pxCurrentTCB[ xCurCoreID ]->xStateListItem ) ) == pdTRUE )
{ {
/* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex, /* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
be suspended permanently. Todo: IDF-5844. */ be suspended permanently */
return; return;
} }
#endif
#if ( INCLUDE_xTaskAbortDelay == 1 ) #if ( INCLUDE_xTaskAbortDelay == 1 )
{ {