Merge branch 'feature/freertos_10.4.3_remove_multicore_queue_locks' into 'master'

FreeRTOS(IDF): Remove/restore queue locks for multicore/single-core respectively (1)

Closes IDF-5740

See merge request espressif/esp-idf!19763
This commit is contained in:
Darian
2022-11-10 19:15:31 +08:00
2 changed files with 660 additions and 477 deletions

View File

@@ -54,11 +54,21 @@
* correct privileged Vs unprivileged linkage and placement. */ * correct privileged Vs unprivileged linkage and placement. */
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
/* Vanilla FreeRTOS uses queue locks to ensure that vTaskPlaceOnEventList()
/* Constants used with the cRxLock and cTxLock structure members. */ * calls are deterministic (as queue locks use scheduler suspension instead of
#define queueUNLOCKED ( ( int8_t ) -1 ) * critical sections). However, the SMP implementation is not deterministic
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 ) * anyways, so use of queue locks can be dropped (replaced with a critical
#define queueINT8_MAX ( ( int8_t ) 127 ) * section) in exchange for better queue performance. */
#if ( configNUM_CORES > 1 )
#define queueUSE_LOCKS 0
#define queueUNLOCKED ( ( int8_t ) 0 )
#else
#define queueUSE_LOCKS 1
/* Constants used with the cRxLock and cTxLock structure members. */
#define queueUNLOCKED ( ( int8_t ) -1 )
#define queueLOCKED_UNMODIFIED ( ( int8_t ) 0 )
#define queueINT8_MAX ( ( int8_t ) 127 )
#endif /* configNUM_CORES > 1 */
/* When the Queue_t structure is used to represent a base queue its pcHead and /* When the Queue_t structure is used to represent a base queue its pcHead and
* pcTail members are used as pointers into the queue storage area. When the * pcTail members are used as pointers into the queue storage area. When the
@@ -121,8 +131,10 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */ UBaseType_t uxLength; /*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */ UBaseType_t uxItemSize; /*< The size of each items that the queue will hold. */
#if ( queueUSE_LOCKS == 1 )
volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ volatile int8_t cRxLock; /*< Stores the number of items received from the queue (removed from the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */ volatile int8_t cTxLock; /*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked. Set to queueUNLOCKED when the queue is not locked. */
#endif
#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */ uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
@@ -136,9 +148,8 @@ typedef struct QueueDefinition /* The old naming convention is used to prevent b
UBaseType_t uxQueueNumber; UBaseType_t uxQueueNumber;
uint8_t ucQueueType; uint8_t ucQueueType;
#endif #endif
#ifdef ESP_PLATFORM
portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */ portMUX_TYPE xQueueLock; /* Spinlock required for SMP critical sections */
#endif // ESP_PLATFORM
} xQUEUE; } xQUEUE;
/* The old xQUEUE name is maintained above then typedefed to the new Queue_t /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
@@ -171,12 +182,14 @@ typedef xQUEUE Queue_t;
* The pcQueueName member of a structure being NULL is indicative of the * The pcQueueName member of a structure being NULL is indicative of the
* array position being vacant. */ * array position being vacant. */
PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ]; PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
#ifdef ESP_PLATFORM
/* Spinlock required in SMP when accessing the queue registry */ /* Spinlock required in SMP when accessing the queue registry */
static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED; static portMUX_TYPE xQueueRegistryLock = portMUX_INITIALIZER_UNLOCKED;
#endif // ESP_PLATFORM
#endif /* configQUEUE_REGISTRY_SIZE */ #endif /* configQUEUE_REGISTRY_SIZE */
#if ( queueUSE_LOCKS == 1 )
/* /*
* Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not * Unlocks a queue locked by a call to prvLockQueue. Locking a queue does not
* prevent an ISR from adding or removing items to the queue, but does prevent * prevent an ISR from adding or removing items to the queue, but does prevent
@@ -185,21 +198,22 @@ typedef xQUEUE Queue_t;
* to indicate that a task may require unblocking. When the queue in unlocked * to indicate that a task may require unblocking. When the queue in unlocked
* these lock counts are inspected, and the appropriate action taken. * these lock counts are inspected, and the appropriate action taken.
*/ */
static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION; static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
/* /*
* Uses a critical section to determine if there is any data in a queue. * Uses a critical section to determine if there is any data in a queue.
* *
* @return pdTRUE if the queue contains no items, otherwise pdFALSE. * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
*/ */
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
/* /*
* Uses a critical section to determine if there is any space in a queue. * Uses a critical section to determine if there is any space in a queue.
* *
* @return pdTRUE if there is no space, otherwise pdFALSE; * @return pdTRUE if there is no space, otherwise pdFALSE;
*/ */
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION; static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
#endif /* queueUSE_LOCKS == 1 */
/* /*
* Copies an item into the queue, either at the front of the queue or the * Copies an item into the queue, either at the front of the queue or the
@@ -256,11 +270,13 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
#endif #endif
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
#if ( queueUSE_LOCKS == 1 )
/* /*
* Macro to mark a queue as locked. Locking a queue prevents an ISR from * Macro to mark a queue as locked. Locking a queue prevents an ISR from
* accessing the queue event lists. * accessing the queue event lists.
*/ */
#define prvLockQueue( pxQueue ) \ #define prvLockQueue( pxQueue ) \
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \ taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); \
{ \ { \
if( ( pxQueue )->cRxLock == queueUNLOCKED ) \ if( ( pxQueue )->cRxLock == queueUNLOCKED ) \
@@ -273,6 +289,7 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
} \ } \
} \ } \
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ) taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) )
#endif /* queueUSE_LOCKS == 1 */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
@@ -282,12 +299,10 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
configASSERT( pxQueue ); configASSERT( pxQueue );
#ifdef ESP_PLATFORM
if( xNewQueue == pdTRUE ) if( xNewQueue == pdTRUE )
{ {
portMUX_INITIALIZE( &( pxQueue->xQueueLock ) ); portMUX_INITIALIZE( &( pxQueue->xQueueLock ) );
} }
#endif // ESP_PLATFORM
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) ); taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
{ {
@@ -295,8 +310,12 @@ BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U; pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
pxQueue->pcWriteTo = pxQueue->pcHead; pxQueue->pcWriteTo = pxQueue->pcHead;
pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */ pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
#if ( queueUSE_LOCKS == 1 )
{
pxQueue->cRxLock = queueUNLOCKED; pxQueue->cRxLock = queueUNLOCKED;
pxQueue->cTxLock = queueUNLOCKED; pxQueue->cTxLock = queueUNLOCKED;
}
#endif /* queueUSE_LOCKS == 1 */
if( xNewQueue == pdFALSE ) if( xNewQueue == pdFALSE )
{ {
@@ -523,9 +542,10 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
/* In case this is a recursive mutex. */ /* In case this is a recursive mutex. */
pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0; pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
#ifdef ESP_PLATFORM
/* Initialize the mutex's spinlock */
portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) ); portMUX_INITIALIZE( &( pxNewQueue->xQueueLock ) );
#endif // ESP_PLATFORM
traceCREATE_MUTEX( pxNewQueue ); traceCREATE_MUTEX( pxNewQueue );
/* Start with the semaphore in the expected state. */ /* Start with the semaphore in the expected state. */
@@ -955,17 +975,43 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
/* If queue locks ARE NOT being used:
* - At this point, the queue is full and entry time has been set
* - We simply check for a time out, block if not timed out, or
* return an error if we have timed out. */
#if ( queueUSE_LOCKS == 0 )
{
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
/* Not timed out yet. Block the current task. */
traceBLOCKING_ON_QUEUE_SEND( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
portYIELD_WITHIN_API();
}
else
{
/* We have timed out. Return an error. */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL;
}
}
#endif /* queueUSE_LOCKS == 0 */
} }
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* If queue locks ARE being used:
* - At this point, the queue is full and entry time has been set
* - We follow the original procedure of locking the queue before
* attempting to block. */
#if ( queueUSE_LOCKS == 1 )
{
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll(); vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@@ -980,19 +1026,15 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
* event list. It is possible that interrupts occurring now * event list. It is possible that interrupts occurring now
* remove this task from the event list again - but as the * remove this task from the event list again - but as the
* scheduler is suspended the task will go onto the pending * scheduler is suspended the task will go onto the pending
* ready list instead of the actual ready list. */ * ready last instead of the actual ready list. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
/* Resuming the scheduler will move tasks from the pending /* Resuming the scheduler will move tasks from the pending
* ready list into the ready list - so it is feasible that this * ready list into the ready list - so it is feasible that this
* task is already in the ready list before it yields - in which * task is already in a ready list before it yields - in which
* case the yield will not cause a context switch unless there * case the yield will not cause a context switch unless there
* is also a higher priority task in the pending ready list. */ * is also a higher priority task in the pending ready list. */
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE ) if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
@@ -1001,26 +1043,20 @@ BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
{ {
/* Try again. */ /* Try again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
} }
} }
else else
{ {
/* The timeout has expired. */ /* The timeout has expired. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
traceQUEUE_SEND_FAILED( pxQueue ); traceQUEUE_SEND_FAILED( pxQueue );
return errQUEUE_FULL; return errQUEUE_FULL;
} }
}
#endif /* queueUSE_LOCKS == 1 */
} /*lint -restore */ } /*lint -restore */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -1059,13 +1095,21 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
* read, instead return a flag to say whether a context switch is required or * read, instead return a flag to say whether a context switch is required or
* not (i.e. has a task with a higher priority than us been woken by this * not (i.e. has a task with a higher priority than us been woken by this
* post). */ * post). */
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); #if ( configNUM_CORES > 1 )
{
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) ) if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{ {
#if ( queueUSE_LOCKS == 1 )
const int8_t cTxLock = pxQueue->cTxLock; const int8_t cTxLock = pxQueue->cTxLock;
#else
/* Queue locks not used, so we treat it as unlocked. */
const int8_t cTxLock = queueUNLOCKED;
#endif /* queueUSE_LOCKS == 1 */
const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
traceQUEUE_SEND_FROM_ISR( pxQueue ); traceQUEUE_SEND_FROM_ISR( pxQueue );
@@ -1172,11 +1216,17 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
} }
else else
{
#if ( queueUSE_LOCKS == 1 )
{ {
/* Increment the lock count so the task that unlocks the queue /* Increment the lock count so the task that unlocks the queue
* knows that data was posted while it was locked. */ * knows that data was posted while it was locked. */
configASSERT( cTxLock != queueINT8_MAX );
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
} }
#endif /* queueUSE_LOCKS == 1 */
}
xReturn = pdPASS; xReturn = pdPASS;
} }
@@ -1185,10 +1235,12 @@ BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ); traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
} }
#if ( configNUM_CORES > 1 )
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@@ -1234,10 +1286,13 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); #if ( configNUM_CORES > 1 )
{
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* When the queue is used to implement a semaphore no data is ever /* When the queue is used to implement a semaphore no data is ever
@@ -1245,7 +1300,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
* space'. */ * space'. */
if( uxMessagesWaiting < pxQueue->uxLength ) if( uxMessagesWaiting < pxQueue->uxLength )
{ {
#if ( queueUSE_LOCKS == 1 )
const int8_t cTxLock = pxQueue->cTxLock; const int8_t cTxLock = pxQueue->cTxLock;
#else
/* Queue locks not used, so we treat it as unlocked. */
const int8_t cTxLock = queueUNLOCKED;
#endif /* queueUSE_LOCKS == 1 */
traceQUEUE_GIVE_FROM_ISR( pxQueue ); traceQUEUE_GIVE_FROM_ISR( pxQueue );
@@ -1342,6 +1402,8 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
#endif /* configUSE_QUEUE_SETS */ #endif /* configUSE_QUEUE_SETS */
} }
else else
{
#if ( queueUSE_LOCKS == 1 )
{ {
/* Increment the lock count so the task that unlocks the queue /* Increment the lock count so the task that unlocks the queue
* knows that data was posted while it was locked. */ * knows that data was posted while it was locked. */
@@ -1349,6 +1411,8 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 ); pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
} }
#endif /* queueUSE_LOCKS == 1 */
}
xReturn = pdPASS; xReturn = pdPASS;
} }
@@ -1357,10 +1421,12 @@ BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
xReturn = errQUEUE_FULL; xReturn = errQUEUE_FULL;
} }
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
} }
#if ( configNUM_CORES > 1 )
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@@ -1451,17 +1517,43 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
/* If queue locks ARE NOT being used:
* - At this point, the queue is empty and entry time has been set
* - We simply check for a time out, block if not timed out, or
* return an error if we have timed out. */
#if ( queueUSE_LOCKS == 0 )
{
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
/* Not timed out yet. Block the current task. */
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
portYIELD_WITHIN_API();
}
else
{
/* We have timed out. Return an error. */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
}
#endif /* queueUSE_LOCKS == 0 */
} }
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* If queue locks ARE being used:
* - At this point, the queue is empty and entry time has been set
* - We follow the original procedure for locking the queue before
* attempting to block. */
#if ( queueUSE_LOCKS == 1 )
{
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll(); vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@@ -1474,32 +1566,22 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE ) if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
#ifndef ESP_PLATFORM
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
#endif // ESP_PLATFORM
} }
else else
{ {
/* The queue contains data again. Loop back to try and read the /* The queue contains data again. Loop back to try and read the
* data. */ * data. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
} }
} }
else else
@@ -1507,11 +1589,7 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
/* Timed out. If there is no data in the queue exit, otherwise loop /* Timed out. If there is no data in the queue exit, otherwise loop
* back and attempt to read the data. */ * back and attempt to read the data. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@@ -1523,6 +1601,8 @@ BaseType_t xQueueReceive( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
}
#endif /* queueUSE_LOCKS == 1 */
} /*lint -restore */ } /*lint -restore */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -1641,17 +1721,69 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
/* If queue locks ARE NOT being used:
* - At this point, the semaphore/mutex is empty/held and entry time
* has been set.
* - We simply check for a time out, inherit priority and block if
* not timed out, or return an error if we have timed out. */
#if ( queueUSE_LOCKS == 0 )
{
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
/* Not timed out yet. If this is a mutex, make the holder
* inherit our priority, then block the current task. */
traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
#if ( configUSE_MUTEXES == 1 )
{
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
#endif /* if ( configUSE_MUTEXES == 1 ) */
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
portYIELD_WITHIN_API();
}
else
{
/* We have timed out. If this is a mutex, make the holder
* disinherit our priority, then return an error. */
#if ( configUSE_MUTEXES == 1 )
{
if( xInheritanceOccurred != pdFALSE )
{
UBaseType_t uxHighestWaitingPriority;
uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
}
}
#endif /* configUSE_MUTEXES */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
traceQUEUE_RECEIVE_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
}
#endif /* queueUSE_LOCKS == 0 */
} }
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* If queue locks ARE being used:
* - At this point, the semaphore/mutex is empty/held and entry time
* has been set.
* - We follow the original procedure for locking the queue, inheriting
* priority, then attempting to block. */
#if ( queueUSE_LOCKS == 1 )
{
/* Interrupts and other tasks can give to and take from the semaphore /* Interrupts and other tasks can give to and take from the semaphore
* now the critical section has been exited. */ * now the critical section has been exited. */
#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll(); vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@@ -1684,43 +1816,29 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE ) if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
#ifndef ESP_PLATFORM
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
#endif // ESP_PLATFORM
} }
else else
{ {
/* There was no timeout and the semaphore count was not 0, so /* There was no timeout and the semaphore count was not 0, so
* attempt to take the semaphore again. */ * attempt to take the semaphore again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
} }
} }
else else
{ {
/* Timed out. */ /* Timed out. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
/* If the semaphore count is 0 exit now as the timeout has /* If the semaphore count is 0 exit now as the timeout has
* expired. Otherwise return to attempt to take the semaphore that is * expired. Otherwise return to attempt to take the semaphore that is
@@ -1760,6 +1878,8 @@ BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
}
#endif /* queueUSE_LOCKS == 1 */
} /*lint -restore */ } /*lint -restore */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -1857,17 +1977,43 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
/* If queue locks ARE NOT being used:
* - At this point, the queue is empty and entry time has been set
* - We simply check for a time out, block if not timed out, or
* return an error if we have timed out. */
#if ( queueUSE_LOCKS == 0 )
{
/* Update the timeout state to see if it has expired yet. */
if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
{
/* Not timed out yet. Block the current task. */
traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
portYIELD_WITHIN_API();
}
else
{
/* We have timed out. Return an error. */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
traceQUEUE_PEEK_FAILED( pxQueue );
return errQUEUE_EMPTY;
}
}
#endif /* queueUSE_LOCKS == 0 */
} }
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
/* If queue locks ARE being used:
* - At this point, the queue is empty and entry time has been set
* - We follow the original procedure for locking the queue before
* attempting to block. */
#if ( queueUSE_LOCKS == 1 )
{
/* Interrupts and other tasks can send to and receive from the queue /* Interrupts and other tasks can send to and receive from the queue
* now the critical section has been exited. */ * now the critical section has been exited. */
#ifdef ESP_PLATFORM /* IDF-3755 */
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#else
vTaskSuspendAll(); vTaskSuspendAll();
#endif // ESP_PLATFORM
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
/* Update the timeout state to see if it has expired yet. */ /* Update the timeout state to see if it has expired yet. */
@@ -1880,32 +2026,22 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
traceBLOCKING_ON_QUEUE_PEEK( pxQueue ); traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
if( xTaskResumeAll() == pdFALSE ) if( xTaskResumeAll() == pdFALSE )
#endif // ESP_PLATFORM
{ {
portYIELD_WITHIN_API(); portYIELD_WITHIN_API();
} }
#ifndef ESP_PLATFORM
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
#endif // ESP_PLATFORM
} }
else else
{ {
/* There is data in the queue now, so don't enter the blocked /* There is data in the queue now, so don't enter the blocked
* state, instead return to try and obtain the data. */ * state, instead return to try and obtain the data. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
} }
} }
else else
@@ -1913,11 +2049,7 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
/* The timeout has expired. If there is still no data in the queue /* The timeout has expired. If there is still no data in the queue
* exit, otherwise go back and try to read the data again. */ * exit, otherwise go back and try to read the data again. */
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#ifdef ESP_PLATFORM /* IDF-3755 */
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#else
( void ) xTaskResumeAll(); ( void ) xTaskResumeAll();
#endif // ESP_PLATFORM
if( prvIsQueueEmpty( pxQueue ) != pdFALSE ) if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
{ {
@@ -1929,6 +2061,8 @@ BaseType_t xQueuePeek( QueueHandle_t xQueue,
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
} }
}
#endif /* queueUSE_LOCKS == 1 */
} /*lint -restore */ } /*lint -restore */
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
@@ -1960,16 +2094,24 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); #if ( configNUM_CORES > 1 )
{
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{
const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting; const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( uxMessagesWaiting > ( UBaseType_t ) 0 ) if( uxMessagesWaiting > ( UBaseType_t ) 0 )
{ {
#if ( queueUSE_LOCKS == 1 )
const int8_t cRxLock = pxQueue->cRxLock; const int8_t cRxLock = pxQueue->cRxLock;
#else
/* Queue locks not used, so we treat it as unlocked. */
const int8_t cRxLock = queueUNLOCKED;
#endif /* queueUSE_LOCKS == 1 */
traceQUEUE_RECEIVE_FROM_ISR( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
@@ -2008,6 +2150,8 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
} }
} }
else else
{
#if ( queueUSE_LOCKS == 1 )
{ {
/* Increment the lock count so the task that unlocks the queue /* Increment the lock count so the task that unlocks the queue
* knows that data was removed while it was locked. */ * knows that data was removed while it was locked. */
@@ -2015,6 +2159,8 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 ); pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
} }
#endif /* queueUSE_LOCKS == 1 */
}
xReturn = pdPASS; xReturn = pdPASS;
} }
@@ -2023,10 +2169,13 @@ BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
xReturn = pdFAIL; xReturn = pdFAIL;
traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ); traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
} }
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
} }
#if ( configNUM_CORES > 1 )
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@@ -2060,8 +2209,12 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
* link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); #if ( configNUM_CORES > 1 )
taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); taskENTER_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
( void ) uxSavedInterruptStatus;
#else
uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
#endif
{ {
/* Cannot block in an ISR, so check there is data available. */ /* Cannot block in an ISR, so check there is data available. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 ) if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
@@ -2082,8 +2235,12 @@ BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue ); traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
} }
} }
#if ( configNUM_CORES > 1 )
taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) ); taskEXIT_CRITICAL_ISR( &( pxQueue->xQueueLock ) );
#else
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
#endif
return xReturn; return xReturn;
} }
@@ -2338,8 +2495,9 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue,
} }
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
static void prvUnlockQueue( Queue_t * const pxQueue ) #if ( queueUSE_LOCKS == 1 )
{ static void prvUnlockQueue( Queue_t * const pxQueue )
{
/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */ /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
/* The lock counts contains the number of extra data items placed or /* The lock counts contains the number of extra data items placed or
@@ -2455,11 +2613,14 @@ static void prvUnlockQueue( Queue_t * const pxQueue )
pxQueue->cRxLock = queueUNLOCKED; pxQueue->cRxLock = queueUNLOCKED;
} }
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
} }
#endif /* queueUSE_LOCKS == 1 */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) #if ( queueUSE_LOCKS == 1 )
{ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
{
BaseType_t xReturn; BaseType_t xReturn;
taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
@@ -2476,7 +2637,8 @@ static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) ); taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
return xReturn; return xReturn;
} }
#endif /* queueUSE_LOCKS == 1 */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue ) BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
@@ -2499,13 +2661,12 @@ BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */ } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) #if ( queueUSE_LOCKS == 1 )
{ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
{
BaseType_t xReturn; BaseType_t xReturn;
#ifndef ESP_PLATFORM taskENTER_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#endif
{ {
if( pxQueue->uxMessagesWaiting == pxQueue->uxLength ) if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
{ {
@@ -2516,12 +2677,11 @@ static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
xReturn = pdFALSE; xReturn = pdFALSE;
} }
} }
#ifndef ESP_PLATFORM taskEXIT_CRITICAL( &( ( ( Queue_t * ) pxQueue )->xQueueLock ) );
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#endif
return xReturn; return xReturn;
} }
#endif /* queueUSE_LOCKS == 1 */
/*-----------------------------------------------------------*/ /*-----------------------------------------------------------*/
BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue ) BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
@@ -2949,6 +3109,8 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
* so it should be called with the scheduler locked and not from a critical * so it should be called with the scheduler locked and not from a critical
* section. */ * section. */
#if ( queueUSE_LOCKS == 1 )
/* Only do anything if there are no messages in the queue. This function /* Only do anything if there are no messages in the queue. This function
* will not actually cause the task to block, just place it on a blocked * will not actually cause the task to block, just place it on a blocked
* list. It will not block until the scheduler is unlocked - at which * list. It will not block until the scheduler is unlocked - at which
@@ -2956,6 +3118,12 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
* the queue is locked, and the calling task blocks on the queue, then the * the queue is locked, and the calling task blocks on the queue, then the
* calling task will be immediately unblocked when the queue is unlocked. */ * calling task will be immediately unblocked when the queue is unlocked. */
prvLockQueue( pxQueue ); prvLockQueue( pxQueue );
#else
/* If queue locks are not used, we use a critical section instead
* to thread safety. */
taskENTER_CRITICAL( &( pxQueue->xQueueLock ) );
#endif /* queueUSE_LOCKS == 1 */
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U ) if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
{ {
@@ -2967,7 +3135,11 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
#if ( queueUSE_LOCKS == 1 )
prvUnlockQueue( pxQueue ); prvUnlockQueue( pxQueue );
#else
taskEXIT_CRITICAL( &( pxQueue->xQueueLock ) );
#endif /* queueUSE_LOCKS == 1 */
} }
#endif /* configUSE_TIMERS */ #endif /* configUSE_TIMERS */
@@ -3095,15 +3267,19 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
/* This function must be called form a critical section. */ /* This function must be called form a critical section. */
configASSERT( pxQueueSetContainer ); configASSERT( pxQueueSetContainer );
/* Acquire the Queue set's spinlock */
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ); configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
/* We need to also acquire the queue set's spinlock as well. */
taskENTER_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength ) if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
{ {
#if ( queueUSE_LOCKS == 1 )
const int8_t cTxLock = pxQueueSetContainer->cTxLock; const int8_t cTxLock = pxQueueSetContainer->cTxLock;
#else
/* Queue locks not used, so we treat it as unlocked. */
const int8_t cTxLock = queueUNLOCKED;
#endif /* queueUSE_LOCKS == 1 */
traceQUEUE_SET_SEND( pxQueueSetContainer ); traceQUEUE_SET_SEND( pxQueueSetContainer );
@@ -3130,18 +3306,22 @@ BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
} }
} }
else else
{
#if ( queueUSE_LOCKS == 1 )
{ {
configASSERT( cTxLock != queueINT8_MAX ); configASSERT( cTxLock != queueINT8_MAX );
pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 ); pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
} }
#endif /* queueUSE_LOCKS == 1 */
}
} }
else else
{ {
mtCOVERAGE_TEST_MARKER(); mtCOVERAGE_TEST_MARKER();
} }
/* Release the Queue set's spinlock */ /* Release the previously acquired queue set's spinlock. */
taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) ); taskEXIT_CRITICAL( &( pxQueueSetContainer->xQueueLock ) );
return xReturn; return xReturn;

View File

@@ -97,6 +97,9 @@ entries:
event_groups: xEventGroupSetBits (default) event_groups: xEventGroupSetBits (default)
event_groups: xEventGroupSync (default) event_groups: xEventGroupSync (default)
event_groups: vEventGroupDelete (default) event_groups: vEventGroupDelete (default)
if FREERTOS_UNICORE = y:
queue: prvUnlockQueue (default)
queue: prvIsQueueEmpty (default)
queue: prvIsQueueFull (default) queue: prvIsQueueFull (default)
queue: prvInitialiseNewQueue (default) queue: prvInitialiseNewQueue (default)
queue: prvInitialiseMutex (default) queue: prvInitialiseMutex (default)